[
  {
    "path": ".github/CODEOWNERS",
    "content": "* @livekit/cs-devs\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.md",
    "content": "---\nname: Bug report\nabout: Report an egress issue\ntitle: \"[BUG]\"\nlabels: bug\nassignees: frostbyte73\n\n---\n\n**Describe the bug**\nA clear and concise description of what the bug is.\n\n**Egress Version**\nWhat version are you running?\n\n**Egress Request**\nPost the request here (be sure to remove any PII).\n\n**Additional context**\nAdd any other context about the problem here.\n\n**Logs**\nPost any relevant logs from the egress service here.\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.md",
    "content": "---\nname: Feature request\nabout: Suggest an idea for this project\ntitle: \"[FEATURE]\"\nlabels: enhancement, help wanted\nassignees: ''\n\n---\n\n**Is your feature request related to a problem? Please describe.**\nA clear and concise description of what the problem is. Ex. I'm always frustrated when [...]\n\n**Describe the solution you'd like**\nA clear and concise description of what you want to happen.\n\n**Describe alternatives you've considered**\nA clear and concise description of any alternative solutions or features you've considered.\n\n**Additional context**\nAdd any other context or screenshots about the feature request here.\n"
  },
  {
    "path": ".github/workflows/publish-chrome.yaml",
    "content": "name: Publish Chrome\n\non:\n  workflow_dispatch:\n    inputs:\n      chrome_version:\n        description: \"Version of Chrome to build\"\n        required: true\n        type: string\n      image_tag:\n        description: \"Docker image tag (defaults to chrome_version if empty)\"\n        required: false\n        type: string\n\njobs:\n  build:\n    runs-on: ubuntu-latest\n\n    steps:\n      - name: Checkout code\n        uses: actions/checkout@v6\n\n      - name: Install the Linode CLI\n        uses: linode/action-linode-cli@v1\n        with:\n          token: ${{ secrets.LINODE_PAT }}\n\n      - name: Get firewall id\n        id: firewall\n        shell: bash\n        env:\n          LINODE_CLI_TOKEN: ${{ secrets.LINODE_PAT }}\n        run: |\n          set -euo pipefail\n\n          firewall_id=\"$(linode-cli firewalls list --label chrome-builder --json | jq -r '.[0].id // empty')\"\n\n          if [ -z \"$firewall_id\" ]; then\n            echo \"Firewall with label chrome-builder not found\"\n            exit 1\n          fi\n\n          echo \"firewall_id=$firewall_id\" >> \"$GITHUB_OUTPUT\"\n          echo \"Using firewall_id=$firewall_id\"\n\n      - name: Build cloud-init user-data\n        id: userdata\n        shell: bash\n        run: |\n          set -euo pipefail\n\n          cat > cloud-init.yaml <<'EOF'\n          #cloud-config\n          package_update: true\n          package_upgrade: false\n\n          packages:\n            - sudo\n            - zip\n            - unzip\n            - curl\n            - git\n            - netcat-openbsd\n\n          users:\n            - name: chrome\n              gecos: Chrome Builder\n              shell: /bin/bash\n              groups: [sudo]\n              sudo: ALL=(ALL) NOPASSWD:ALL\n              lock_passwd: true\n              ssh_authorized_keys:\n                - ${LINODE_SSH_PUBLIC_KEY}\n\n          write_files:\n            - path: /etc/ssh/sshd_config.d/99-github-actions.conf\n              permissions: '0644'\n              content: |\n                PasswordAuthentication no\n                ClientAliveInterval 60\n                ClientAliveCountMax 3\n\n          runcmd:\n            - mkdir -p /home/chrome/.ssh\n            - chmod 700 /home/chrome/.ssh\n            - printf '%s\\n' \"${LINODE_SSH_PUBLIC_KEY}\" > /home/chrome/.ssh/authorized_keys\n            - chmod 600 /home/chrome/.ssh/authorized_keys\n            - chown -R chrome:chrome /home/chrome/.ssh\n            - systemctl restart ssh || systemctl restart sshd || true\n          EOF\n\n          sed \"s|\\${LINODE_SSH_PUBLIC_KEY}|${{ secrets.LINODE_SSH_PUBLIC_KEY }}|g\" cloud-init.yaml > cloud-init.rendered.yaml\n\n          user_data_b64=\"$(base64 -w 0 cloud-init.rendered.yaml)\"\n          echo \"user_data_b64=$user_data_b64\" >> \"$GITHUB_OUTPUT\"\n\n      - name: Get or create builder\n        id: builder\n        shell: bash\n        env:\n          LINODE_CLI_TOKEN: ${{ secrets.LINODE_PAT }}\n        run: |\n          set -euo pipefail\n\n          builder_json=\"$(linode-cli linodes list --label chrome-builder --json)\"\n          builder_id=\"$(echo \"$builder_json\" | jq -r '.[0].id // empty')\"\n\n          if [ -n \"$builder_id\" ]; then\n            echo \"Reusing existing builder: $builder_id\"\n\n            builder_ip=\"$(echo \"$builder_json\" | jq -r '.[0].ipv4[0] // empty')\"\n            builder_status=\"$(echo \"$builder_json\" | jq -r '.[0].status // empty')\"\n\n            if [ \"$builder_status\" = \"offline\" ]; then\n              echo \"Booting existing builder\"\n              linode-cli linodes boot \"$builder_id\"\n            fi\n\n            echo \"builder_created=false\" >> \"$GITHUB_OUTPUT\"\n            echo \"builder_id=$builder_id\" >> \"$GITHUB_OUTPUT\"\n            echo \"builder_ip=$builder_ip\" >> \"$GITHUB_OUTPUT\"\n            exit 0\n          fi\n\n          echo \"No existing builder found, creating a new one\"\n\n          builder_info=\"$(linode-cli linodes create \\\n            --backups_enabled false \\\n            --booted true \\\n            --image linode/ubuntu22.04 \\\n            --label chrome-builder \\\n            --private_ip false \\\n            --region us-west \\\n            --root_pass \"${{ secrets.LINODE_ROOT_PASS }}\" \\\n            --type g6-dedicated-56 \\\n            --authorized_keys \"${{ secrets.LINODE_SSH_PUBLIC_KEY }}\" \\\n            --firewall_id \"${{ steps.firewall.outputs.firewall_id }}\" \\\n            --metadata.user_data \"${{ steps.userdata.outputs.user_data_b64 }}\" \\\n            --json)\"\n\n          echo \"$builder_info\"\n\n          builder_id=\"$(echo \"$builder_info\" | jq -r '.[0].id')\"\n          builder_ip=\"$(echo \"$builder_info\" | jq -r '.[0].ipv4[0]')\"\n\n          echo \"builder_created=true\" >> \"$GITHUB_OUTPUT\"\n          echo \"builder_id=$builder_id\" >> \"$GITHUB_OUTPUT\"\n          echo \"builder_ip=$builder_ip\" >> \"$GITHUB_OUTPUT\"\n\n      - name: Wait for Builder status\n        shell: bash\n        env:\n          LINODE_CLI_TOKEN: ${{ secrets.LINODE_PAT }}\n        run: |\n          set -euo pipefail\n\n          status=\"$(linode-cli linodes view \"${{ steps.builder.outputs.builder_id }}\" --json | jq -r '.[0].status')\"\n\n          while [ \"$status\" = \"provisioning\" ] || [ \"$status\" = \"booting\" ]; do\n            echo \"Builder status: $status\"\n            sleep 5\n            status=\"$(linode-cli linodes view \"${{ steps.builder.outputs.builder_id }}\" --json | jq -r '.[0].status')\"\n          done\n\n          echo \"Builder status: $status\"\n\n          if [ \"$status\" != \"running\" ]; then\n            echo \"Builder failed to reach running state\"\n            exit 1\n          fi\n\n      - name: Write SSH keys\n        shell: bash\n        run: |\n          set -euo pipefail\n\n          mkdir -p ~/.ssh\n          chmod 700 ~/.ssh\n          printf '%s\\n' \"${{ secrets.LINODE_SSH_PRIVATE_KEY }}\" > ~/.ssh/linode_ed25519\n          printf '%s\\n' \"${{ secrets.LINODE_SSH_PUBLIC_KEY }}\" > ~/.ssh/linode_ed25519.pub\n          chmod 600 ~/.ssh/linode_ed25519 ~/.ssh/linode_ed25519.pub\n\n      - name: Wait for SSH and cloud-init if needed\n        shell: bash\n        run: |\n          set -euo pipefail\n\n          ip=\"${{ steps.builder.outputs.builder_ip }}\"\n\n          for i in $(seq 1 180); do\n            if [ \"${{ steps.builder.outputs.builder_created }}\" = \"true\" ]; then\n              remote_cmd='cloud-init status --wait >/dev/null 2>&1 || true; echo ready'\n            else\n              remote_cmd='echo ready'\n            fi\n\n            if ssh -i ~/.ssh/linode_ed25519 \\\n              -o BatchMode=yes \\\n              -o PasswordAuthentication=no \\\n              -o StrictHostKeyChecking=accept-new \\\n              -o ConnectTimeout=5 \\\n              root@\"$ip\" \\\n              \"$remote_cmd\" >/tmp/ssh-ready.txt 2>/tmp/ssh-ready.err; then\n              echo \"SSH is ready\"\n              break\n            fi\n\n            echo \"Waiting for SSH on $ip...\"\n            cat /tmp/ssh-ready.err || true\n            sleep 2\n          done\n\n          grep -q ready /tmp/ssh-ready.txt\n\n      - name: Verify chrome user\n        shell: bash\n        run: |\n          set -euo pipefail\n\n          ssh -i ~/.ssh/linode_ed25519 \\\n            -o BatchMode=yes \\\n            -o PasswordAuthentication=no \\\n            -o StrictHostKeyChecking=yes \\\n            root@${{ steps.builder.outputs.builder_ip }} \\\n            'id chrome && sudo -iu chrome whoami'\n\n      - name: Amd64\n        shell: bash\n        run: |\n          set -euo pipefail\n\n          ssh -i ~/.ssh/linode_ed25519 \\\n            -o BatchMode=yes \\\n            -o PasswordAuthentication=no \\\n            -o StrictHostKeyChecking=yes \\\n            -o ServerAliveInterval=60 \\\n            root@${{ steps.builder.outputs.builder_ip }} \\\n            \"sudo -iu chrome bash -s -- '${{ inputs.chrome_version }}'\" \\\n            < ./build/chrome/scripts/amd64.sh\n\n      - name: Arm64\n        shell: bash\n        run: |\n          set -euo pipefail\n\n          ssh -i ~/.ssh/linode_ed25519 \\\n            -o BatchMode=yes \\\n            -o PasswordAuthentication=no \\\n            -o StrictHostKeyChecking=yes \\\n            -o ServerAliveInterval=60 \\\n            root@${{ steps.builder.outputs.builder_ip }} \\\n            \"sudo -iu chrome bash -s -- '${{ inputs.chrome_version }}'\" \\\n            < ./build/chrome/scripts/arm64.sh\n\n      - name: Drivers\n        shell: bash\n        run: |\n          set -euo pipefail\n\n          ssh -i ~/.ssh/linode_ed25519 \\\n            -o BatchMode=yes \\\n            -o PasswordAuthentication=no \\\n            -o StrictHostKeyChecking=yes \\\n            -o ServerAliveInterval=60 \\\n            root@${{ steps.builder.outputs.builder_ip }} \\\n            \"sudo -iu chrome bash -s -- '${{ inputs.chrome_version }}'\" \\\n            < ./build/chrome/scripts/driver.sh\n\n      - name: Prepare artifacts\n        shell: bash\n        run: |\n          set -euo pipefail\n\n          ssh -i ~/.ssh/linode_ed25519 \\\n            -o BatchMode=yes \\\n            -o PasswordAuthentication=no \\\n            -o StrictHostKeyChecking=yes \\\n            root@${{ steps.builder.outputs.builder_ip }} \\\n            'sudo -iu chrome bash -lc \"cd /home/chrome && rm -f output.zip && zip -r output.zip ./output\"'\n\n      - name: Download artifacts\n        shell: bash\n        run: |\n          set -euo pipefail\n\n          rm -rf \"${{ github.workspace }}/build/chrome/output\"\n          mkdir -p \"${{ github.workspace }}/build/chrome\"\n\n          scp -i ~/.ssh/linode_ed25519 \\\n            -o BatchMode=yes \\\n            -o PasswordAuthentication=no \\\n            -o StrictHostKeyChecking=yes \\\n            root@${{ steps.builder.outputs.builder_ip }}:/home/chrome/output.zip \\\n            \"${{ github.workspace }}/build/chrome/output.zip\"\n\n          unzip -o \"${{ github.workspace }}/build/chrome/output.zip\" -d \"${{ github.workspace }}/build/chrome\"\n\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v4\n\n      - name: Login to DockerHub\n        uses: docker/login-action@v4\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n\n      - name: Build and push\n        uses: docker/build-push-action@v7\n        with:\n          context: ./build/chrome\n          file: ./build/chrome/Dockerfile\n          push: true\n          platforms: linux/amd64,linux/arm64\n          tags: livekit/chrome-installer:${{ inputs.image_tag || inputs.chrome_version }}\n\n      - name: Delete created builder on success\n        if: success()\n        shell: bash\n        env:\n          LINODE_CLI_TOKEN: ${{ secrets.LINODE_PAT }}\n        run: |\n          set -euo pipefail\n          linode-cli linodes delete \"${{ steps.builder.outputs.builder_id }}\"\n"
  },
  {
    "path": ".github/workflows/publish-egress.yaml",
    "content": "# Copyright 2023 LiveKit, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nname: Publish Egress\n\n# Controls when the action will run.\non:\n  workflow_dispatch:\n  push:\n    # only publish on version tags\n    tags:\n      - 'v*.*.*'\njobs:\n  docker:\n    runs-on: namespace-profile-8vcpu-cache\n    steps:\n      - uses: actions/checkout@v6\n\n      - uses: actions/cache@v5\n        with:\n          path: |\n            ~/go/pkg/mod\n            ~/go/bin\n            ~/.cache\n          key: \"${{ runner.os }}-egress-${{ hashFiles('**/go.sum') }}\"\n          restore-keys: ${{ runner.os }}-egress\n\n      - name: Docker metadata\n        id: docker-md\n        uses: docker/metadata-action@v6\n        with:\n          images: livekit/egress\n          tags: |\n            type=semver,pattern=v{{version}}\n            type=semver,pattern=v{{major}}.{{minor}}\n\n      - name: Set up Go\n        uses: actions/setup-go@v6\n        with:\n          go-version: 1.26.1\n\n      - name: Download Go modules\n        run: go mod download\n\n      - name: Get template image\n        id: template-tag\n        run: |\n          TEMPLATE_TAG=`go run github.com/livekit/egress/cmd/template_version`\n          echo \"template_tag=$TEMPLATE_TAG\" > \"$GITHUB_OUTPUT\"\n\n      - name: Set up QEMU\n        uses: docker/setup-qemu-action@v4\n\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v4\n\n      - name: Login to DockerHub\n        uses: docker/login-action@v4\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n\n      - name: Build and push\n        uses: docker/build-push-action@v7\n        with:\n          context: .\n          file: ./build/egress/Dockerfile\n          push: true\n          platforms: linux/amd64,linux/arm64\n          tags: ${{ steps.docker-md.outputs.tags }}\n          labels: ${{ steps.docker-md.outputs.labels }}\n          build-args: |\n            TEMPLATE_TAG=${{ steps.template-tag.outputs.template_tag }}\n"
  },
  {
    "path": ".github/workflows/publish-gstreamer-base.yaml",
    "content": "on:\n  workflow_call:\n    inputs:\n      version:\n        required: true\n        type: string\n      buildjet-runs-on:\n        required: true\n        type: string\n      arch:\n        required: true\n        type: string\n    secrets:\n      DOCKERHUB_USERNAME:\n        required: true\n      DOCKERHUB_TOKEN:\n        required: true\nenv:\n  GST_VERSION: \"${{ inputs.version }}\"\n  LIBNICE_VERSION: \"0.1.21\"\n\njobs:\n  base-gstreamer-build:\n    runs-on: ${{ inputs.buildjet-runs-on }}\n\n    steps:\n      - name: Checkout code\n        uses: actions/checkout@v6\n\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v4\n\n      - name: Login to Docker Hub\n        uses: docker/login-action@v4\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n\n      - name: Build and push base\n        uses: docker/build-push-action@v7\n        with:\n          context: ./build/gstreamer\n          push: true\n          build-args: |\n            GSTREAMER_VERSION=${{ env.GST_VERSION }}\n            LIBNICE_VERSION=${{ env.LIBNICE_VERSION }}\n          file: ./build/gstreamer/Dockerfile-base\n          tags: livekit/gstreamer:${{ env.GST_VERSION }}-base-${{ inputs.arch }}\n\n      - name: Build and push dev\n        uses: docker/build-push-action@v7\n        with:\n          context: ./build/gstreamer\n          push: true\n          build-args: |\n            GSTREAMER_VERSION=${{ env.GST_VERSION }}\n            LIBNICE_VERSION=${{ env.LIBNICE_VERSION }}\n          file: ./build/gstreamer/Dockerfile-dev\n          tags: livekit/gstreamer:${{ env.GST_VERSION }}-dev-${{ inputs.arch }}\n\n      - name: Build and push prod\n        uses: docker/build-push-action@v7\n        with:\n          context: ./build/gstreamer\n          push: true\n          build-args: |\n            GSTREAMER_VERSION=${{ env.GST_VERSION }}\n            LIBNICE_VERSION=${{ env.LIBNICE_VERSION }}\n          file: ./build/gstreamer/Dockerfile-prod\n          tags: livekit/gstreamer:${{ env.GST_VERSION }}-prod-${{ inputs.arch }}\n\n      - name: Build and push prod RS\n        uses: docker/build-push-action@v7\n        with:\n          context: ./build/gstreamer\n          push: true\n          build-args: |\n            GSTREAMER_VERSION=${{ env.GST_VERSION }}\n            LIBNICE_VERSION=${{ env.LIBNICE_VERSION }}\n          file: ./build/gstreamer/Dockerfile-prod-rs\n          tags: livekit/gstreamer:${{ env.GST_VERSION }}-prod-rs-${{ inputs.arch }}\n"
  },
  {
    "path": ".github/workflows/publish-gstreamer.yaml",
    "content": "name: Publish GStreamer\n\non:\n  workflow_dispatch:\n    inputs:\n      version:\n        description: \"GStreamer version to publish (e.g. 1.24.4)\"\n        required: true\n        type: string\n\njobs:\n  gstreamer-build-amd64:\n    uses: ./.github/workflows/publish-gstreamer-base.yaml\n    with:\n      version: ${{ inputs.version }}\n      buildjet-runs-on: namespace-profile-8vcpu-cache\n      arch: amd64\n    secrets:\n      DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}\n      DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}\n\n  gstreamer-build-arm64:\n    uses: ./.github/workflows/publish-gstreamer-base.yaml\n    with:\n      version: ${{ inputs.version }}\n      buildjet-runs-on: namespace-profile-arm-16\n      arch: arm64\n    secrets:\n      DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}\n      DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}\n\n  tag-gstreamer-build:\n    needs: [gstreamer-build-amd64, gstreamer-build-arm64]\n    runs-on: ubuntu-latest\n    steps:\n      - name: Checkout code\n        uses: actions/checkout@v6\n\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v4\n\n      - name: Login to Docker Hub\n        uses: docker/login-action@v4\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n\n      - name: Run tag script\n        run: ./build/gstreamer/tag.sh ${{ inputs.version }}\n"
  },
  {
    "path": ".github/workflows/publish-template-sdk.yaml",
    "content": "# Copyright 2023 LiveKit, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nname: Publish Template SDK\non:\n  push:\n    tags:\n      - \"template*\"\n\njobs:\n  deploy:\n    runs-on: ubuntu-latest\n    defaults:\n      run:\n        working-directory: ./template-sdk\n    steps:\n      - uses: actions/checkout@v6\n      - uses: pnpm/action-setup@v5\n        with:\n          version: 10\n      - name: Use Node.js 18\n        uses: actions/setup-node@v6\n        with:\n          node-version: 24\n          cache: \"pnpm\"\n          cache-dependency-path: ./template-sdk/pnpm-lock.yaml\n\n      - name: Install Dependencies\n        run: pnpm install\n\n      - name: Build\n        run: pnpm build\n\n      - name: Publish to npm\n        run: |\n          npm config set '//registry.npmjs.org/:_authToken' $NPM_TOKEN\n          npm publish\n        env:\n          NPM_TOKEN: ${{ secrets.NPM_TOKEN }}\n"
  },
  {
    "path": ".github/workflows/publish-template.yaml",
    "content": "# Copyright 2023 LiveKit, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nname: Publish Templates\non:\n  workflow_dispatch:\n  pull_request:\n    branches: [main]\n    paths:\n      - build/template/Dockerfile\n      - template-default/**\n      - template-sdk/**\n\njobs:\n  docker:\n    runs-on: namespace-profile-8vcpu-cache\n    steps:\n      - uses: actions/checkout@v6\n        # for pull requests, need to checkout head for EndBug/add-and-commit to work\n        if: github.event_name == 'pull_request'\n        with:\n          repository: ${{ github.event.pull_request.head.repo.full_name }}\n          ref: ${{ github.event.pull_request.head.ref }}\n\n      - uses: actions/checkout@v6\n        if: github.event_name != 'pull_request'\n\n      - name: Docker metadata\n        id: docker-md\n        uses: docker/metadata-action@v6\n        with:\n          images: livekit/egress-templates\n          tags: |\n            type=sha\n            type=raw,value=latest,enable={{is_default_branch}}\n\n      - name: Set up Docker Buildx\n        uses: docker/setup-buildx-action@v4\n\n      - name: Login to DockerHub\n        uses: docker/login-action@v4\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n\n      - name: Build and push\n        uses: docker/build-push-action@v7\n        with:\n          context: .\n          file: ./build/template/Dockerfile\n          push: true\n          platforms: linux/amd64,linux/arm64\n          tags: ${{ steps.docker-md.outputs.tags }}\n          labels: ${{ steps.docker-md.outputs.labels }}\n\n      - name: Update template version\n        run: |\n          SHORT_SHA=`echo ${GITHUB_SHA} | cut -c 1-7`\n          sed \"s/TemplateVersion.*= \\\"[-a-z0-9]*\\\"/TemplateVersion = \\\"sha-${SHORT_SHA}\\\"/\" < version/version.go  > version.go\n          mv -f version.go version/version.go\n\n      - name: Commit version changes\n        uses: EndBug/add-and-commit@v9\n        with:\n          default_author: github_actions\n          message: |\n            Commit: https://github.com/${{ github.repository }}/commit/${{ github.sha }}\n            Ref: https://github.com/${{ github.repository }}/tree/${{ github.ref_name }}\n            By: ${{ github.actor }}\n          push: true\n\n"
  },
  {
    "path": ".github/workflows/slack-notifier.yaml",
    "content": "name: PR Slack Notifier\n\non:\n  pull_request:\n    types: [review_requested, reopened, closed, synchronize]\n  pull_request_review:\n    types: [submitted]\n\npermissions:\n  contents: read\n  pull-requests: write\n  issues: write\n\nconcurrency:\n  group: pr-slack-${{ github.event.pull_request.number }}-${{ github.workflow }}\n  cancel-in-progress: false\n\njobs:\n  notify-devs:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: livekit/slack-notifier-action@main\n        with:\n          config_json: ${{ secrets.SLACK_NOTIFY_CONFIG_JSON }}\n          slack_token: ${{ secrets.SLACK_PR_NOTIFIER_TOKEN }}\n"
  },
  {
    "path": ".github/workflows/test-cleanup.yaml",
    "content": "name: Cleanup Integration Images\n\non:\n  schedule:\n    - cron: '0 6 * * *'\n  workflow_dispatch:\n\npermissions: {}\n\njobs:\n  cleanup:\n    runs-on: ubuntu-latest\n    steps:\n      - name: Delete old integration image tags\n        env:\n          DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_CLEANUP_USERNAME }}\n          DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_CLEANUP_TOKEN }}\n          REPO: livekit/egress-integration\n          RETENTION_DAYS: 3\n        run: |\n          set -euo pipefail\n\n          # Authenticate with Docker Hub\n          TOKEN=$(curl -sf \"https://hub.docker.com/v2/users/login\" \\\n            -H \"Content-Type: application/json\" \\\n            -d \"{\\\"username\\\":\\\"${DOCKERHUB_USERNAME}\\\",\\\"password\\\":\\\"${DOCKERHUB_TOKEN}\\\"}\" \\\n            | jq -r '.token')\n\n          if [ -z \"$TOKEN\" ] || [ \"$TOKEN\" = \"null\" ]; then\n            echo \"::error::Failed to authenticate with Docker Hub\"\n            exit 1\n          fi\n\n          CUTOFF=$(date -u -d \"${RETENTION_DAYS} days ago\" +%Y-%m-%dT%H:%M:%S.%NZ)\n          echo \"Deleting tags last updated before ${CUTOFF}\"\n\n          DELETED=0\n          RETAINED=0\n          PAGE=1\n\n          while true; do\n            RESPONSE=$(curl -sf \"https://hub.docker.com/v2/repositories/${REPO}/tags?page_size=100&page=${PAGE}\" \\\n              -H \"Authorization: Bearer ${TOKEN}\")\n\n            TAGS=$(echo \"$RESPONSE\" | jq -r '.results // empty')\n            if [ -z \"$TAGS\" ] || [ \"$TAGS\" = \"[]\" ]; then\n              break\n            fi\n\n            for TAG_INFO in $(echo \"$TAGS\" | jq -c '.[]'); do\n              TAG_NAME=$(echo \"$TAG_INFO\" | jq -r '.name')\n              LAST_UPDATED=$(echo \"$TAG_INFO\" | jq -r '.last_updated')\n\n              if [[ \"$LAST_UPDATED\" < \"$CUTOFF\" ]]; then\n                STATUS=$(curl -s -o /dev/null -w \"%{http_code}\" -X DELETE \\\n                  \"https://hub.docker.com/v2/repositories/${REPO}/tags/${TAG_NAME}\" \\\n                  -H \"Authorization: Bearer ${TOKEN}\")\n\n                if [ \"$STATUS\" = \"204\" ]; then\n                  echo \"Deleted: ${TAG_NAME} (last updated: ${LAST_UPDATED})\"\n                  DELETED=$((DELETED + 1))\n                else\n                  echo \"::warning::Failed to delete ${TAG_NAME}: HTTP ${STATUS}\"\n                fi\n\n                sleep 0.5\n              else\n                RETAINED=$((RETAINED + 1))\n              fi\n            done\n\n            NEXT=$(echo \"$RESPONSE\" | jq -r '.next // empty')\n            if [ -z \"$NEXT\" ] || [ \"$NEXT\" = \"null\" ]; then\n              break\n            fi\n            PAGE=$((PAGE + 1))\n          done\n\n          echo \"\"\n          echo \"Summary: deleted=${DELETED} retained=${RETAINED}\"\n"
  },
  {
    "path": ".github/workflows/test-integration.yaml",
    "content": "# Copyright 2023 LiveKit, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nname: Integration Test\non:\n  workflow_dispatch:\n  pull_request:\n    branches: [ main ]\n    paths:\n      - build/chrome/**\n      - build/egress/**\n      - build/gstreamer/**\n      - build/test/**\n      - cmd/**\n      - pkg/**\n      - test/**\n      - go.mod\n\njobs:\n  build:\n    runs-on: namespace-profile-8vcpu-cache\n    outputs:\n      image: ${{ steps.docker-md.outputs.tags }}\n    steps:\n      - uses: actions/checkout@v6\n        with:\n          lfs: true\n      - name: Fetch media-samples (with LFS)\n        env:\n          GITHUB_TOKEN: ${{ github.token }}\n        run: build/test/fetch-media-samples.sh\n\n      - uses: actions/cache@v5\n        with:\n          path: |\n            ~/go/pkg/mod\n            ~/go/bin\n            ~/.cache\n          key: egress-integration-${{ hashFiles('**/go.sum') }}\n          restore-keys: egress-integration\n\n      - name: Docker metadata\n        id: docker-md\n        uses: docker/metadata-action@v6\n        with:\n          images: livekit/egress-integration\n          tags: |\n            type=sha\n\n      - name: Set up Go\n        uses: actions/setup-go@v6\n        with:\n          go-version: 1.26.1\n\n      - name: Download Go modules\n        run: go mod download\n\n      - name: Get template image\n        id: template-tag\n        run: |\n          TEMPLATE_TAG=`go run github.com/livekit/egress/cmd/template_version`\n          echo \"template_tag=$TEMPLATE_TAG\" > \"$GITHUB_OUTPUT\"\n\n      - name: Login to DockerHub\n        uses: docker/login-action@v4\n        with:\n          username: ${{ secrets.DOCKERHUB_USERNAME }}\n          password: ${{ secrets.DOCKERHUB_TOKEN }}\n\n      - name: Set up Buildx\n        id: buildx\n        uses: docker/setup-buildx-action@v4\n        with:\n          driver: docker-container\n          install: true\n\n      - name: Build and push\n        uses: docker/build-push-action@v7\n        with:\n          builder: ${{ steps.buildx.outputs.name }}\n          context: .\n          file: ./build/test/Dockerfile\n          push: true\n          platforms: linux/amd64\n          tags: ${{ steps.docker-md.outputs.tags }}\n          labels: ${{ steps.docker-md.outputs.labels }}\n          build-args: |\n            DEADLOCK=1\n            TEMPLATE_TAG=${{ steps.template-tag.outputs.template_tag }}\n          # TODO: Enable caching once registry periodic cleanup is implemented\n          #cache-from: type=registry,ref=livekit/egress-integration:buildcache\n          #cache-to:   type=registry,ref=livekit/egress-integration:buildcache,mode=max,ignore-error=true\n\n  test:\n    needs: build\n    strategy:\n      fail-fast: false\n      matrix:\n        integration_type: [file-room, file-track, file-media, stream, segments, images, multi, edge]\n    runs-on: namespace-profile-8vcpu-cache\n    steps:\n      - uses: shogo82148/actions-setup-redis@v1\n        with:\n          redis-version: '6.x'\n          auto-start: true\n      - run: redis-cli ping\n\n      - name: Run tests\n        env:\n          IMAGE: ${{needs.build.outputs.image}}\n        run: |\n          docker run --rm \\\n            --network host \\\n            -e GITHUB_WORKFLOW=1 \\\n            -e EGRESS_CONFIG_STRING=\"$(echo ${{ secrets.EGRESS_CONFIG_STRING }} | base64 -d)\" \\\n            -e INTEGRATION_TYPE=\"${{ matrix.integration_type }}\" \\\n            -e S3_UPLOAD=\"$(echo ${{ secrets.S3_UPLOAD }} | base64 -d)\" \\\n            -e GCP_UPLOAD=\"$(echo ${{ secrets.GCP_UPLOAD }} | base64 -d)\" \\\n            ${{ env.IMAGE }}\n"
  },
  {
    "path": ".github/workflows/test-template.yaml",
    "content": "# Copyright 2023 LiveKit, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nname: Template Test\non:\n  workflow_dispatch:\n  pull_request:\n    branches: [main]\n    paths:\n      - build/template/Dockerfile\n      - template-default/**\n      - template-sdk/**\n\ndefaults:\n  run:\n    working-directory: template-default\n\njobs:\n  test:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v6\n      - uses: pnpm/action-setup@v5\n        with:\n          version: 10\n      - name: Use Node.js 22\n        uses: actions/setup-node@v6\n        with:\n          node-version: 24\n          cache: \"pnpm\"\n          cache-dependency-path: ./template-default/pnpm-lock.yaml\n\n      - run: pnpm install\n      - run: pnpm build\n"
  },
  {
    "path": ".gitignore",
    "content": ".idea/\n.DS_Store\n\n.github/workflows/config.yaml\nbuild/plugins/\nmedia-samples/\ntest/output/*\ntest/*.yaml\n!test/config-sample.yaml\n"
  },
  {
    "path": ".golangci.yaml",
    "content": "version: \"2\"\nrun:\n  build-tags:\n    - deadlock\n    - integration\n  tests: true\nlinters:\n  default: none\n  enable:\n    - asasalint\n    - dupl\n    - errname\n    - fatcontext\n    - forbidigo\n    - goconst\n    - govet\n    - misspell\n    - nilerr\n    - revive\n    - staticcheck\n  settings:\n    forbidigo:\n      forbid:\n        - pattern: sync\\.Mutex\n        - pattern: sync\\.RWMutex\n      analyze-types: true\n    staticcheck:\n      checks:\n        - \"all\"\n        - \"-ST1000\" # package comments — not useful for internal packages\n        - \"-ST1003\" # naming conventions — would break exported API\n    misspell:\n      mode: default\n      locale: US\n    revive:\n      confidence: 0.8\n      severity: warning\n      rules:\n        - name: argument-limit\n        - name: atomic\n        - name: blank-imports\n        - name: context-as-argument\n        - name: context-keys-type\n        - name: deep-exit\n        - name: defer\n        - name: dot-imports\n        - name: early-return\n        - name: errorf\n        - name: error-strings\n        - name: if-return\n        - name: increment-decrement\n        - name: indent-error-flow\n        - name: range\n        - name: range-val-address\n        - name: receiver-naming\n        - name: superfluous-else\n        - name: unexported-return\n        - name: unused-parameter\n        - name: var-declaration\n        - name: waitgroup-by-value\n        - name: datarace\n        - name: identical-branches\n        - name: identical-switch-branches\n        - name: unconditional-recursion\n        - name: unreachable-code \n        - name: empty-block\n  exclusions:\n    generated: lax\n    rules:\n      - path: cmd/server/main.go\n        text: 'pattern templates: no matching files found'\n    paths:\n      - third_party$\n      - builtin$\n      - examples$\nissues:\n  max-issues-per-linter: 0\n  max-same-issues: 0\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "NOTICE",
    "content": "Copyright 2023 LiveKit, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n   http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "<!--BEGIN_BANNER_IMAGE-->\n\n<picture>\n  <source media=\"(prefers-color-scheme: dark)\" srcset=\"/.github/banner_dark.png\">\n  <source media=\"(prefers-color-scheme: light)\" srcset=\"/.github/banner_light.png\">\n  <img style=\"width:100%;\" alt=\"The LiveKit icon, the name of the repository and some sample code in the background.\" src=\"https://raw.githubusercontent.com/livekit/egress/main/.github/banner_light.png\">\n</picture>\n\n<!--END_BANNER_IMAGE-->\n\n# LiveKit Egress\n\n<!--BEGIN_DESCRIPTION-->\nWebRTC is fantastic for last-mile media delivery, but interoperability with other services can be challenging.\nAn application may want to do things like store a session for future playback, relay a stream to a CDN, or process a track through a transcription service – workflows where media travels through a different system or protocol.\nLiveKit Egress is the solution to these interoperability challenges. It provides a consistent set of APIs that gives you\nuniversal export of your LiveKit sessions and tracks.\n<!--END_DESCRIPTION-->\n\n## Capabilities\n\n1. **Room composite** for exporting an entire room.\n2. **Web egress** for recordings that aren't attached to a single LiveKit room.\n3. **Track composite** for exporting synchronized tracks of a single participant.\n4. **Track egress** for exporting individual tracks.\n\nDepending on your request type, the egress service will either launch Chrome using a web template\n(room composite requests) or a supplied url (web requests), or it will use the Go SDK directly (track and track composite requests).\nIrrespective of method used, when moving between protocols, containers or encodings, LiveKit's egress service will automatically transcode streams for you using GStreamer.\n\n## Supported Output\n\n| Egress Type     | MP4 File | OGG File | WebM File | HLS (TS Segments) | RTMP(s) Stream | SRT Stream | WebSocket Stream | Thumbnails (JPEGs) |\n|-----------------|----------|----------|-----------|-------------------|----------------|------------------|------------------|--------------------|\n| Room Composite  | ✅        | ✅        |           | ✅                 | ✅              | ✅              |                  | ✅                  |\n| Web             | ✅        | ✅        |           | ✅                 | ✅              | ✅              |                  | ✅                  |\n| Track Composite | ✅        | ✅        |           | ✅                 | ✅              | ✅              |                  | ✅                  |\n| Track           | ✅        | ✅        | ✅         |                   |                |               | ✅                |                    |\n\nFiles can be uploaded to any S3 compatible storage, Azure, or GCP.\n\n## Documentation\n\nFull docs available [here](https://docs.livekit.io/guides/egress/)\n\n### Config\n\nThe Egress service takes a yaml config file:\n\n```yaml\n# required fields\napi_key: livekit server api key. LIVEKIT_API_KEY env can be used instead\napi_secret: livekit server api secret. LIVEKIT_API_SECRET env can be used instead\nws_url: livekit server websocket url. LIVEKIT_WS_URL can be used instead\nredis:\n  address: must be the same redis address used by your livekit server\n  username: redis username\n  password: redis password\n  db: redis db\n\n# optional fields\nhealth_port: port used for http health checks (default 0)\ntemplate_port: port used to host default templates (default 7980)\nprometheus_port: port used to collect prometheus metrics (default 0)\ndebug_handler_port: port used to host http debug handlers (default 0)\nlogging:\n  level: debug, info, warn, or error (default info)\n  json: true\ntemplate_base: can be used to host custom templates (default http://localhost:<template_port>/)\nbackup_storage: files will be moved here when uploads fail. location must have write access granted for all users\nenable_chrome_sandbox: if true, egress will run Chrome with sandboxing enabled. This requires a specific Docker setup, see below.\ncpu_cost: # optionally override cpu cost estimation, used when accepting or denying requests\n  room_composite_cpu_cost: 3.0\n  web_cpu_cost: 3.0\n  track_composite_cpu_cost: 2.0\n  track_cpu_cost: 1.0\nsession_limits: # optional egress duration limits - once hit, egress will end with status EGRESS_LIMIT_REACHED\n  file_output_max_duration: 1h\n  stream_output_max_duration: 90m\n  segment_output_max_duration: 3h\n\n# file upload config - only one of the following. Can be overridden per request\nstorage:\n  s3:\n    access_key: AWS_ACCESS_KEY_ID env or EMPTY if using IAM Role or instance profile\n    secret: AWS_SECRET_ACCESS_KEY env or EMPTY if using IAM Role or instance profile\n    session_token: AWS_SESSION_TOKEN env or EMPTY if using IAM Role or instance profile\n    region: AWS_DEFAULT_REGION env or EMPTY if using IAM Role or instance profile\n    endpoint: (optional) custom endpoint\n    bucket: bucket to upload files to\n    # the following s3 options can only be set in config, *not* per request, they will be added to any per-request options\n    proxy_config:\n      url: (optional) proxy url\n      username: (optional) proxy username\n      password: (optional) proxy password\n    max_retries: (optional, default=3) number or retries to attempt\n    max_retry_delay: (optional, default=5s) max delay between retries (e.g. 5s, 100ms, 1m...)\n    min_retry_delay: (optional, default=500ms) min delay between retries (e.g. 100ms, 1s...)\n    aws_log_level: (optional, default=LogOff) log level for aws sdk (LogDebugWithRequestRetries, LogDebug, ...) \n  azure:\n    account_name: AZURE_STORAGE_ACCOUNT env can be used instead\n    account_key: AZURE_STORAGE_KEY env can be used instead\n    container_name: container to upload files to\n  gcp:\n    credentials_json: GOOGLE_APPLICATION_CREDENTIALS env can be used instead\n    bucket: bucket to upload files to\n    proxy_config:\n      url: (optional) proxy url\n      username: (optional) proxy username\n      password: (optional) proxy password\n  alioss:\n    access_key: Ali OSS AccessKeyId\n    secret: Ali OSS AccessKeySecret\n    region: Ali OSS region\n    endpoint: (optional) custom endpoint\n    bucket: bucket to upload files to\n\n# dev/debugging fields\ninsecure: can be used to connect to an insecure websocket (default false)\ndebug:\n  enable_profiling: create and upload pipeline dot file and pprof file on pipeline failure\n  s3: upload config for dotfiles (see above)\n  azure: upload config for dotfiles (see above)\n  gcp: upload config for dotfiles (see above)\n  alioss: upload config for dotfiles (see above)\n```\n\nThe config file can be added to a mounted volume with its location passed in the EGRESS_CONFIG_FILE env var, or its body can be passed in the EGRESS_CONFIG_BODY env var.\n\n### Filenames\n\nThe below templates can also be used in filename/filepath parameters:\n\n| Egress Type     | {room_id} | {room_name} | {time} | {utc} | {publisher_identity} | {track_id} | {track_type} | {track_source} |\n|-----------------|-----------|-------------|--------|-------|----------------------|------------|--------------|----------------|\n| Room Composite  | ✅         | ✅           | ✅      | ✅     |                      |            |              |                |\n| Web             |           |             | ✅      | ✅     |                      |            |              |                |\n| Track Composite | ✅         | ✅           | ✅      | ✅     | ✅                    |            |              |                |\n| Track           | ✅         | ✅           | ✅      | ✅     | ✅                    | ✅          | ✅            | ✅              |\n\n* If no filename is provided with a request, one will be generated in the form of `\"{room_name}-{time}\"`.\n* If your filename ends with a `/`, a file will be generated in that directory.\n* For 1/2/2006, 3:04:05.789 PM, {time} format would display \"2006-01-02T150405\", and {utc} format \"20060102150405789\"\n\nExamples:\n\n| Request filename                         | Resulting filename                                |\n|------------------------------------------|---------------------------------------------------|\n| \"\"                                       | testroom-2022-10-04T011306.mp4                    |\n| \"livekit-recordings/\"                    | livekit-recordings/testroom-2022-10-04T011306.mp4 |\n| \"{room_name}/{time}\"                     | testroom/2022-10-04T011306.mp4                    |\n| \"{room_id}-{publisher_identity}.mp4\"     | 10719607-f7b0-4d82-afe1-06b77e91fe12-david.mp4    |\n| \"{track_type}-{track_source}-{track_id}\" | audio-microphone-TR_SKasdXCVgHsei.ogg             |\n\n### Running locally\n\nThese changes are **not** recommended for a production setup.\n\nTo run against a local livekit server, you'll need to do the following:\n\n- open `/usr/local/etc/redis.conf` and comment out the line that says `bind 127.0.0.1`\n- change `protected-mode yes` to `protected-mode no` in the same file\n- find your IP as seen by docker\n  - `ws_url` needs to be set using the IP as Docker sees it\n  - on linux, this should be `172.17.0.1`\n  - on mac or windows, run `docker run -it --rm alpine nslookup host.docker.internal` and you should see something like\n    `Name: host.docker.internal Address: 192.168.65.2`\n\nThese changes allow the service to connect to your local redis instance from inside the docker container.\n\nCreate a directory to mount. In this example, we will use `~/egress-test`.\n\nCreate a config.yaml in the above directory.\n\n- `redis` and `ws_url` should use the above IP instead of `localhost`\n- `insecure` should be set to true\n\n```yaml\nlog_level: debug\napi_key: your-api-key\napi_secret: your-api-secret\nws_url: ws://192.168.65.2:7880\ninsecure: true\nredis:\n  address: 192.168.65.2:6379\n```\n\nThen to run the service:\n\n```shell\ndocker run --rm \\\n    -e EGRESS_CONFIG_FILE=/out/config.yaml \\\n    -v ~/egress-test:/out \\\n    livekit/egress\n```\n\nYou can then use our [cli](https://github.com/livekit/livekit-cli) to submit egress requests to your server.\n\n### Chrome sandboxing\n\nBy default, Room Composite and Web egresses run with Chrome sandboxing disabled. This is because the default docker security settings prevent Chrome from\nswitching to a different kernel namespace, which is needed by Chrome to setup its sandbox.\n\nChrome sandboxing within Egress can be reenabled by setting the the `enable_chrome_sandbox` option to `true` in the egress configuration, and launching docker using the [provided\nseccomp security profile](https://github.com/livekit/egress/blob/main/chrome-sandboxing-seccomp-profile.json):\n\n```shell\ndocker run --rm \\\n    -e EGRESS_CONFIG_FILE=/out/config.yaml \\\n    -v ~/egress-test:/out \\\n    --security-opt seccomp=chrome-sandboxing-seccomp-profile.json \\\n    livekit/egress\n```\n\nThis profile is based on the [default docker seccomp security profile](https://github.com/moby/moby/blob/master/profiles/seccomp/default.json) and allows\nthe 2 extra system calls (`clone` and `unshare`) that Chrome needs to setup the sandbox.\n\nNote that kubernetes disables seccomp entirely by default, which means that running with Chrome sandboxing enabled is possible on a kubernetes cluster with\nthe default security settings.\n\n## FAQ\n\n### Can I store the files locally instead of uploading to cloud storage?\n- Yes, you can mount a volume with your `docker run` command (e.g. `-v ~/livekit-egress:/out/`), and use the mounted\ndirectory in your filenames (e.g. `/out/my-recording.mp4`). Since egress is not run as the root user, write permissions\nwill need to be enabled for all users.\n\n### I get a `\"no response from egress service\"` error when sending a request\n\n- Your livekit server cannot connect to an egress instance through redis. Make sure they are both able to reach the same redis db.\n- If all of your egress instances are full, you'll need to deploy more instances or set up autoscaling.\n\n### I get a different error when sending a request\n\n- Make sure your egress, livekit, server sdk, and livekit-cli are all up to date.\n\n### Can I run this without docker?\n\n- It's possible, but not recommended. To do so, you would need to install gstreamer along with its plugins, chrome, xvfb,\n  and have a pulseaudio server running.\n\n## Testing and Development\n\nTo run the test against your own LiveKit rooms, a deployed LiveKit server with a secure websocket url is required.\nFirst, create `egress/test/config.yaml`:\n\n```yaml\nlog_level: debug\napi_key: your-api-key\napi_secret: your-api-secret\nws_url: wss://your-livekit-url.com\nredis:\n  address: 192.168.65.2:6379\nroom_only: false\nweb_only: false\ntrack_composite_only: false\ntrack_only: false\nfile_only: false\nstream_only: false\nsegments_only: false\nmuting: false\ndot_files: false\nshort: false\n```\n\nJoin a room using https://example.livekit.io or your own client, then run `mage integration test/config.yaml`.\nThis will test recording different file types, output settings, and streams against your room.\n\n<!--BEGIN_REPO_NAV-->\n<br/><table>\n<thead><tr><th colspan=\"2\">LiveKit Ecosystem</th></tr></thead>\n<tbody>\n<tr><td>Agents SDKs</td><td><a href=\"https://github.com/livekit/agents\">Python</a> · <a href=\"https://github.com/livekit/agents-js\">Node.js</a></td></tr><tr></tr>\n<tr><td>LiveKit SDKs</td><td><a href=\"https://github.com/livekit/client-sdk-js\">Browser</a> · <a href=\"https://github.com/livekit/client-sdk-swift\">Swift</a> · <a href=\"https://github.com/livekit/client-sdk-android\">Android</a> · <a href=\"https://github.com/livekit/client-sdk-flutter\">Flutter</a> · <a href=\"https://github.com/livekit/client-sdk-react-native\">React Native</a> · <a href=\"https://github.com/livekit/rust-sdks\">Rust</a> · <a href=\"https://github.com/livekit/node-sdks\">Node.js</a> · <a href=\"https://github.com/livekit/python-sdks\">Python</a> · <a href=\"https://github.com/livekit/client-sdk-unity\">Unity</a> · <a href=\"https://github.com/livekit/client-sdk-unity-web\">Unity (WebGL)</a> · <a href=\"https://github.com/livekit/client-sdk-esp32\">ESP32</a> · <a href=\"https://github.com/livekit/client-sdk-cpp\">C++</a></td></tr><tr></tr>\n<tr><td>Starter Apps</td><td><a href=\"https://github.com/livekit-examples/agent-starter-python\">Python Agent</a> · <a href=\"https://github.com/livekit-examples/agent-starter-node\">TypeScript Agent</a> · <a href=\"https://github.com/livekit-examples/agent-starter-react\">React App</a> · <a href=\"https://github.com/livekit-examples/agent-starter-swift\">SwiftUI App</a> · <a href=\"https://github.com/livekit-examples/agent-starter-android\">Android App</a> · <a href=\"https://github.com/livekit-examples/agent-starter-flutter\">Flutter App</a> · <a href=\"https://github.com/livekit-examples/agent-starter-react-native\">React Native App</a> · <a href=\"https://github.com/livekit-examples/agent-starter-embed\">Web Embed</a></td></tr><tr></tr>\n<tr><td>UI Components</td><td><a href=\"https://github.com/livekit/components-js\">React</a> · <a href=\"https://github.com/livekit/components-android\">Android Compose</a> · <a href=\"https://github.com/livekit/components-swift\">SwiftUI</a> · <a href=\"https://github.com/livekit/components-flutter\">Flutter</a></td></tr><tr></tr>\n<tr><td>Server APIs</td><td><a href=\"https://github.com/livekit/node-sdks\">Node.js</a> · <a href=\"https://github.com/livekit/server-sdk-go\">Golang</a> · <a href=\"https://github.com/livekit/server-sdk-ruby\">Ruby</a> · <a href=\"https://github.com/livekit/server-sdk-kotlin\">Java/Kotlin</a> · <a href=\"https://github.com/livekit/python-sdks\">Python</a> · <a href=\"https://github.com/livekit/rust-sdks\">Rust</a> · <a href=\"https://github.com/agence104/livekit-server-sdk-php\">PHP (community)</a> · <a href=\"https://github.com/pabloFuente/livekit-server-sdk-dotnet\">.NET (community)</a></td></tr><tr></tr>\n<tr><td>Resources</td><td><a href=\"https://docs.livekit.io\">Docs</a> · <a href=\"https://docs.livekit.io/mcp\">Docs MCP Server</a> · <a href=\"https://github.com/livekit/livekit-cli\">CLI</a> · <a href=\"https://cloud.livekit.io\">LiveKit Cloud</a></td></tr><tr></tr>\n<tr><td>LiveKit Server OSS</td><td><a href=\"https://github.com/livekit/livekit\">LiveKit server</a> · <b>Egress</b> · <a href=\"https://github.com/livekit/ingress\">Ingress</a> · <a href=\"https://github.com/livekit/sip\">SIP</a></td></tr><tr></tr>\n<tr><td>Community</td><td><a href=\"https://community.livekit.io\">Developer Community</a> · <a href=\"https://livekit.io/join-slack\">Slack</a> · <a href=\"https://x.com/livekit\">X</a> · <a href=\"https://www.youtube.com/@livekit_io\">YouTube</a></td></tr>\n</tbody>\n</table>\n<!--END_REPO_NAV-->\n"
  },
  {
    "path": "bootstrap.sh",
    "content": "#!/bin/bash\n# Copyright 2023 LiveKit, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nif ! command -v mage &> /dev/null\nthen\n  pushd /tmp\n  git clone https://github.com/magefile/mage\n  cd mage\n  go run bootstrap.go\n  rm -rf /tmp/mage\n  popd\nfi\n\nif ! command -v mage &> /dev/null\nthen\n  echo \"Ensure `go env GOPATH`/bin is in your \\$PATH\"\n  exit 1\nfi\n\ngo mod download\n"
  },
  {
    "path": "build/chrome/Dockerfile",
    "content": "# Copyright 2023 LiveKit, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nFROM ubuntu:24.04\n\nRUN mkdir /chrome-installer\nCOPY output/arm64 /chrome-installer/arm64\nCOPY output/amd64 /chrome-installer/amd64\nCOPY install-chrome /chrome-installer/install-chrome\n"
  },
  {
    "path": "build/chrome/README.md",
    "content": "# Chrome installer\n\nThis dockerfile is used to install chrome on ubuntu amd64 and arm64.\n\nThere is no official or available arm64 build with H264 support, so we needed to compile it from source. \n\n## Usage\n\nTo install chrome, add the following to your dockerfile:\n\n```dockerfile\nARG TARGETPLATFORM\nCOPY --from=livekit/chrome-installer:124.0.6367.201 /chrome-installer /chrome-installer\nRUN /chrome-installer/install-chrome \"$TARGETPLATFORM\"\nENV PATH=${PATH}:/chrome\nENV CHROME_DEVEL_SANDBOX=/usr/local/sbin/chrome-devel-sandbox\n```\n\n## Compilation \n\nIt must be cross compiled from an amd64 builder. This build takes multiple hours, even on fast machines.\n\nRelevant docs:\n* [Build instructions](https://chromium.googlesource.com/chromium/src/+/main/docs/linux/build_instructions.md)\n* [Cross compiling](https://chromium.googlesource.com/chromium/src/+/main/docs/linux/chromium_arm.md)\n\n### Requirements \n\n* 64-bit Intel machine (x86_64)\n* Ubuntu 22.04 LTS\n* 64+ CPU cores\n* 128GB+ RAM\n* 100GB+ disk space\n"
  },
  {
    "path": "build/chrome/install-chrome",
    "content": "#!/bin/bash\nset -euxo pipefail\n\nif [ \"$1\" = \"linux/arm64\" ]\nthen\n  apt-get update\n  apt-get install -y \\\n    ca-certificates \\\n    fonts-liberation \\\n    libasound2t64 \\\n    libatk-bridge2.0-0 \\\n    libatk1.0-0 \\\n    libc6 \\\n    libcairo2 \\\n    libcups2 \\\n    libdbus-1-3 \\\n    libexpat1 \\\n    libfontconfig1 \\\n    libgbm1 \\\n    libglib2.0-0 \\\n    libnspr4 \\\n    libnss3 \\\n    libpango-1.0-0 \\\n    libpangocairo-1.0-0 \\\n    libx11-6 \\\n    libx11-xcb1 \\\n    libxcb1 \\\n    libxcomposite1 \\\n    libxcursor1 \\\n    libxdamage1 \\\n    libxext6 \\\n    libxfixes3 \\\n    libxi6 \\\n    libxrandr2 \\\n    libxrender1 \\\n    libxss1 \\\n    libxtst6 \\\n    xdg-utils\n  chmod +x /chrome-installer/arm64/chromedriver-mac-arm64/chromedriver\n  mv -f /chrome-installer/arm64/chromedriver-mac-arm64/chromedriver /usr/local/bin/chromedriver\n  mv /chrome-installer/arm64/ /chrome\n  cp /chrome/chrome_sandbox /usr/local/sbin/chrome-devel-sandbox\n  chown root:root /usr/local/sbin/chrome-devel-sandbox\n  chmod 4755 /usr/local/sbin/chrome-devel-sandbox\nelse\n  apt-get install -y /chrome-installer/amd64/google-chrome-stable_amd64.deb\n  chmod +x /chrome-installer/amd64/chromedriver-linux64/chromedriver\n  mv -f /chrome-installer/amd64/chromedriver-linux64/chromedriver /usr/local/bin/chromedriver\nfi\n\nrm -rf /chrome-installer\n"
  },
  {
    "path": "build/chrome/scripts/amd64.sh",
    "content": "#!/bin/bash\nset -xeuo pipefail\n\nwget https://dl.google.com/linux/chrome/deb/pool/main/g/google-chrome-stable/google-chrome-stable_\"$1\"-1_amd64.deb\nmkdir -p \"$HOME/output/amd64\"\nmv google-chrome-stable_\"$1\"-1_amd64.deb \"$HOME/output/amd64/google-chrome-stable_amd64.deb\"\n"
  },
  {
    "path": "build/chrome/scripts/arm64.sh",
    "content": "#!/bin/bash\nset -xeuo pipefail\n\nsudo apt-get update\nsudo apt-get install -y \\\n  apt-utils \\\n  build-essential \\\n  curl \\\n  git \\\n  python3 \\\n  sudo \\\n  zip\n\nif [ ! -d \"$HOME/depot_tools\" ]; then\n  git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git \"$HOME/depot_tools\"\nfi\nexport PATH=\"$PATH:$HOME/depot_tools\"\n\nmkdir -p \"$HOME\"\n\nif [ ! -d \"$HOME/chromium/.gclient\" ] && [ ! -f \"$HOME/chromium/.gclient\" ]; then\n  mkdir -p \"$HOME/chromium\"\n  cd \"$HOME/chromium\"\n  fetch --nohooks --no-history chromium\nfi\n\ncd \"$HOME/chromium\"\n\ncat > .gclient <<'EOF'\nsolutions = [\n  {\n    \"name\": \"src\",\n    \"url\": \"https://chromium.googlesource.com/chromium/src.git\",\n    \"managed\": False,\n    \"custom_deps\": {},\n    \"custom_vars\": {\n      \"checkout_pgo_profiles\": True,\n    },\n    \"target_cpu\": \"arm64\",\n  },\n]\nEOF\n\ncd src\n\ngit fetch --no-tags --depth=1 origin \"refs/tags/$1:refs/tags/$1\"\ngit checkout -B stable \"tags/$1\"\n\nfor attempt in 1 2 3 4 5; do\n  if gclient sync -D --with_branch_heads -j 8; then\n    break\n  fi\n\n  if [ \"$attempt\" -eq 5 ]; then\n    echo \"gclient sync failed after $attempt attempts\"\n    exit 1\n  fi\n\n  sleep_secs=$((attempt * 30))\n  echo \"gclient sync failed, retrying in ${sleep_secs}s...\"\n  sleep \"$sleep_secs\"\ndone\n\n./build/install-build-deps.sh\n./build/linux/sysroot_scripts/install-sysroot.py --arch=arm64\ngclient runhooks\n\ngn gen out/default --args='\n  target_cpu=\"arm64\"\n  proprietary_codecs=true\n  ffmpeg_branding=\"Chrome\"\n\n  is_official_build=true\n  is_debug=false\n\n  symbol_level=0\n  blink_symbol_level=0\n  v8_symbol_level=0\n\n  enable_nacl=false\n  rtc_use_pipewire=false\n\n  is_component_build=false\n  use_jumbo_build=true\n\n  dcheck_always_on=false\n'\n\nexport NINJA_SUMMARIZE_BUILD=1\nautoninja -C out/default chrome chrome_sandbox -j \"$(nproc)\"\n\ncd out/default\n\nrm -rf \"$HOME/output/arm64\"\nmkdir -p \"$HOME/output/arm64/locales\"\n\nmv locales/en-US.pak \"$HOME/output/arm64/locales/\"\n\nrequired_files=(\n  chrome\n  chrome-wrapper\n  chrome_100_percent.pak\n  chrome_200_percent.pak\n  chrome_crashpad_handler\n  chrome_sandbox\n  icudtl.dat\n  libEGL.so\n  libGLESv2.so\n  resources.pak\n  snapshot_blob.bin\n  v8_context_snapshot.bin\n)\n\nfor f in \"${required_files[@]}\"; do\n  if [ ! -e \"$f\" ]; then\n    echo \"Missing required build output: $f\"\n    exit 1\n  fi\n  mv \"$f\" \"$HOME/output/arm64/\"\ndone\n"
  },
  {
    "path": "build/chrome/scripts/driver.sh",
    "content": "#!/bin/bash\nset -xeuo pipefail\n\nwget https://storage.googleapis.com/chrome-for-testing-public/\"$1\"/linux64/chromedriver-linux64.zip\nunzip chromedriver-linux64.zip -d \"$HOME/output/amd64\"\nwget https://storage.googleapis.com/chrome-for-testing-public/\"$1\"/mac-arm64/chromedriver-mac-arm64.zip\nunzip chromedriver-mac-arm64.zip -d \"$HOME/output/arm64\"\n"
  },
  {
    "path": "build/egress/Dockerfile",
    "content": "# Copyright 2023 LiveKit, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nARG TEMPLATE_TAG=latest\n\nFROM livekit/egress-templates:$TEMPLATE_TAG AS template\n\nFROM livekit/gstreamer:1.24.12-dev\n\nARG TARGETPLATFORM\nARG TARGETARCH\nENV TARGETARCH=${TARGETARCH}\nENV TARGETPLATFORM=${TARGETPLATFORM}\n\nWORKDIR /workspace\n\n# install go\nRUN wget https://go.dev/dl/go1.26.1.linux-${TARGETARCH}.tar.gz && \\\n    rm -rf /usr/local/go && \\\n    tar -C /usr/local -xzf go1.26.1.linux-${TARGETARCH}.tar.gz\nENV PATH=\"/usr/local/go/bin:${PATH}\"\n\n# download go modules\nCOPY go.mod .\nCOPY go.sum .\nRUN go mod download\n\n# copy source\nCOPY cmd/ cmd/\nCOPY pkg/ pkg/\nCOPY version/ version/\n\n# copy templates\nCOPY --from=template workspace/build/ cmd/server/templates/\n# delete .map files\nRUN find cmd/server/templates/ -name *.map | xargs rm\n\n# build\nRUN CGO_ENABLED=1 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on GODEBUG=disablethp=1 go build -a -o egress ./cmd/server\n\n# install tini\nENV TINI_VERSION v0.19.0\n\nADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini-${TARGETARCH} /tini\nRUN chmod +x /tini\n\nFROM livekit/gstreamer:1.24.12-prod\n\nARG TARGETPLATFORM\n\n# install deps\nRUN apt-get update && \\\n    apt-get install -y \\\n    curl \\\n    fonts-noto \\\n    gnupg \\\n    pulseaudio \\\n    unzip \\\n    wget \\\n    xvfb \\\n    gstreamer1.0-plugins-base-\n\n# install chrome\nCOPY --from=livekit/chrome-installer:146.0.7680.177-1 /chrome-installer /chrome-installer\n\nRUN /chrome-installer/install-chrome \"$TARGETPLATFORM\"\n\n# clean up\nRUN rm -rf /var/lib/apt/lists/*\n\n# create egress user\nRUN useradd -ms /bin/bash -g root -G sudo,pulse,pulse-access egress\nRUN mkdir -p home/egress/tmp home/egress/.cache/xdgr && \\\n    chown -R egress /home/egress\n\n# copy files\nCOPY --from=1 /workspace/egress /bin/\nCOPY --from=1 /tini /tini\nCOPY build/egress/entrypoint.sh /\n\n# run\nUSER egress\nENV PATH=${PATH}:/chrome\nENV XDG_RUNTIME_DIR=/home/egress/.cache/xdgr\nENV CHROME_DEVEL_SANDBOX=/usr/local/sbin/chrome-devel-sandbox\nENTRYPOINT [\"/entrypoint.sh\"]\n"
  },
  {
    "path": "build/egress/entrypoint.sh",
    "content": "#!/usr/bin/env bash\n# Copyright 2023 LiveKit, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -euo pipefail\n\n# Clean out tmp\nrm -rf /home/egress/tmp/*\n\n# Start pulseaudio\nrm -rf /var/run/pulse /var/lib/pulse /home/egress/.config/pulse /home/egress/.cache/xdgr/pulse\npulseaudio -D --verbose --exit-idle-time=-1 --disallow-exit > /dev/null 2>&1\n\n# Run egress service\nexec /tini -- egress\n"
  },
  {
    "path": "build/gstreamer/Dockerfile-base",
    "content": "FROM ubuntu:24.04\n\nARG GSTREAMER_VERSION\n\nARG LIBNICE_VERSION\n\nCOPY install-dependencies /\n\nRUN /install-dependencies\n\nENV PATH=/root/.cargo/bin:$PATH\n\nRUN for lib in gstreamer gst-plugins-base gst-plugins-good gst-plugins-bad gst-plugins-ugly gst-libav; \\\n        do \\\n            wget https://gstreamer.freedesktop.org/src/$lib/$lib-$GSTREAMER_VERSION.tar.xz && \\\n            tar -xf $lib-$GSTREAMER_VERSION.tar.xz && \\\n            rm $lib-$GSTREAMER_VERSION.tar.xz && \\\n            mv $lib-$GSTREAMER_VERSION $lib; \\\n        done\n\n# rust plugins are apparently only realeased on gitlab\n\nRUN wget https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/archive/gstreamer-$GSTREAMER_VERSION/gst-plugins-rs-gstreamer-$GSTREAMER_VERSION.tar.gz && \\\ntar xfz gst-plugins-rs-gstreamer-$GSTREAMER_VERSION.tar.gz && \\\nrm gst-plugins-rs-gstreamer-$GSTREAMER_VERSION.tar.gz && \\\nmv gst-plugins-rs-gstreamer-$GSTREAMER_VERSION gst-plugins-rs\n\nRUN wget https://libnice.freedesktop.org/releases/libnice-$LIBNICE_VERSION.tar.gz && \\\ntar xfz libnice-$LIBNICE_VERSION.tar.gz && \\\nrm libnice-$LIBNICE_VERSION.tar.gz && \\\nmv libnice-$LIBNICE_VERSION libnice\n"
  },
  {
    "path": "build/gstreamer/Dockerfile-dev",
    "content": "ARG GSTREAMER_VERSION\n\nFROM livekit/gstreamer:${GSTREAMER_VERSION}-base-${TARGETARCH}\n\nENV DEBUG=true\nENV OPTIMIZATIONS=false\n\nCOPY compile /\nCOPY compile-rs /\n\nRUN /compile\nRUN /compile-rs\n\nFROM ubuntu:24.04\n\nCOPY install-dependencies /\n\nRUN /install-dependencies\n\nCOPY --from=0 /compiled-binaries /\n"
  },
  {
    "path": "build/gstreamer/Dockerfile-prod",
    "content": "ARG GSTREAMER_VERSION\n\nFROM livekit/gstreamer:${GSTREAMER_VERSION}-base-${TARGETARCH}\n\nENV DEBUG=false\nENV OPTIMIZATIONS=true\n\nCOPY compile /\n\nRUN /compile\n\nFROM ubuntu:24.04\n\nRUN apt-get update && \\\n    apt-get dist-upgrade -y && \\\n    apt-get install -y --no-install-recommends \\\n        bubblewrap \\\n        ca-certificates \\\n        iso-codes \\\n        ladspa-sdk \\\n        liba52-0.7.4 \\\n        libaa1 \\\n        libaom3 \\\n        libass9 \\\n        libavcodec60 \\\n        libavfilter9 \\\n        libavformat60 \\\n        libavutil58 \\\n        libbs2b0 \\\n        libbz2-1.0 \\\n        libcaca0 \\\n        libcap2 \\\n        libchromaprint1 \\\n        libcurl3-gnutls \\\n        libdca0 \\\n        libde265-0 \\\n        libdv4 \\\n        libdvdnav4 \\\n        libdvdread8 \\\n        libdw1 \\\n        libegl1 \\\n        libepoxy0 \\\n        libfaac0 \\\n        libfaad2 \\\n        libfdk-aac2 \\\n        libflite1 \\\n        libgbm1 \\\n        libgcrypt20 \\\n        libgl1 \\\n        libgles1 \\\n        libgles2 \\\n        libglib2.0-0 \\\n        libgme0 \\\n        libgmp10 \\\n        libgsl27 \\\n        libgsm1 \\\n        libgudev-1.0-0 \\\n        libharfbuzz-icu0 \\\n        libjpeg8 \\\n        libkate1 \\\n        liblcms2-2 \\\n        liblilv-0-0 \\\n        libmjpegutils-2.1-0 \\\n        libmodplug1 \\\n        libmp3lame0 \\\n        libmpcdec6 \\\n        libmpeg2-4 \\\n        libmpg123-0 \\\n        libofa0 \\\n        libogg0 \\\n        libopencore-amrnb0 \\\n        libopencore-amrwb0 \\\n        libopenexr-3-1-30 \\\n        libopenjp2-7 \\\n        libopus0 \\\n        liborc-0.4-0 \\\n        libpango-1.0-0 \\\n        libpng16-16 \\\n        librsvg2-2 \\\n        librtmp1 \\\n        libsbc1 \\\n        libseccomp2 \\\n        libshout3 \\\n        libsndfile1 \\\n        libsoundtouch1 \\\n        libsoup2.4-1 \\\n        libspandsp2 \\\n        libspeex1 \\\n        libsrt1.5-openssl \\\n        libsrtp2-1 \\\n        libssl3 \\\n        libtag1v5 \\\n        libtheora0 \\\n        libtwolame0 \\\n        libunwind8 \\\n        libvisual-0.4-0 \\\n        libvo-aacenc0 \\\n        libvo-amrwbenc0 \\\n        libvorbis0a \\\n        libvpx9 \\\n        libvulkan1 \\\n        libwavpack1 \\\n        libwebp7 \\\n        libwebpdemux2 \\\n        libwebpmux3 \\\n        libwebrtc-audio-processing1 \\\n        libwildmidi2 \\\n        libwoff1 \\\n        libx264-164 \\\n        libx265-199 \\\n        libxkbcommon0 \\\n        libxslt1.1 \\\n        libzbar0 \\\n        libzvbi0 \\\n        mjpegtools \\\n        xdg-dbus-proxy && \\\n    apt-get clean && \\\n    rm -rf /var/lib/apt/lists/*\n\nCOPY --from=0 /compiled-binaries /\n"
  },
  {
    "path": "build/gstreamer/Dockerfile-prod-rs",
    "content": "ARG GSTREAMER_VERSION\n\nFROM livekit/gstreamer:${GSTREAMER_VERSION}-base-${TARGETARCH}\n\nFROM livekit/gstreamer:${GSTREAMER_VERSION}-dev-${TARGETARCH}\n\nCOPY --from=0 /gst-plugins-rs /gst-plugins-rs\n\nENV DEBUG=false\nENV OPTIMIZATIONS=true\nENV PATH=/root/.cargo/bin:$PATH\n\nCOPY compile-rs /\n\nRUN /compile-rs \n\nFROM livekit/gstreamer:${GSTREAMER_VERSION}-prod-${TARGETARCH}\n\nCOPY --from=1 /compiled-binaries /\n"
  },
  {
    "path": "build/gstreamer/compile",
    "content": "#!/bin/bash\nset -euxo pipefail\n\nfor repo in gstreamer libnice gst-plugins-base gst-plugins-good gst-plugins-bad gst-plugins-ugly gst-libav; do\n  pushd $repo\n\n  opts=\"-D prefix=/usr\"\n\n  if [[ $repo != \"libnice\" ]]; then\n    opts=\"$opts -D tests=disabled -D doc=disabled\"\n  fi\n\n  if [[ $repo == \"gstreamer\" ]]; then\n    opts=\"$opts -D examples=disabled -D introspection=disabled\"\n  elif [[ $repo == \"gst-plugins-base\" ]]; then\n    opts=\"$opts -D examples=disabled -D introspection=disabled -D qt5=disabled\"\n  elif [[ $repo == \"gst-plugins-good\" ]]; then\n    opts=\"$opts -D examples=disabled -D pulse=enabled -D qt5=disabled\"\n  elif [[ $repo == \"gst-plugins-bad\" ]]; then\n    opts=\"$opts -D gpl=enabled -D examples=disabled -D introspection=disabled\"\n  elif [[ $repo == \"gst-plugins-ugly\" ]]; then\n    opts=\"$opts -D gpl=enabled\"\n  fi\n\n  if [[ $DEBUG == 'true' ]]; then\n    if [[ $OPTIMIZATIONS == 'true' ]]; then\n      opts=\"$opts -D buildtype=debugoptimized\"\n    else\n      opts=\"$opts -D buildtype=debug\"\n    fi\n  else\n    opts=\"$opts -D buildtype=release -D b_lto=true\"\n  fi\n\n  rm -rf build\n  meson setup build $opts\n\n  if [[ -z \"${NINJA_JOBS:-}\" ]]; then\n    # Limit to 4 jobs to avoid OOM issues\n    NINJA_JOBS=4\n  fi\n\n  # This is needed for other plugins to be built properly\n  ninja -j \"${NINJA_JOBS}\" -C build install\n  # This is where we'll grab build artifacts from\n  DESTDIR=/compiled-binaries ninja -j \"${NINJA_JOBS}\" -C build install\n  popd\ndone\n\ngst-inspect-1.0\n"
  },
  {
    "path": "build/gstreamer/compile-rs",
    "content": "#!/bin/bash\nset -euxo pipefail\n\n: \"${CARGO_BUILD_JOBS:=4}\"\nexport CARGO_BUILD_JOBS\n\nfor repo in gst-plugins-rs; do\n  pushd $repo\n\n  # strip binaries in debug mode\n  mv Cargo.toml Cargo.toml.old\n  sed s,'\\[profile.release\\]','[profile.release]\\nstrip=\"debuginfo\"', Cargo.toml.old > Cargo.toml\n  cargo update -p time\n\n  opts=\"-D prefix=/usr -D tests=disabled -D doc=disabled\"\n\n  if [[ $DEBUG == 'true' ]]; then\n    if [[ $OPTIMIZATIONS == 'true' ]]; then\n      opts=\"$opts -D buildtype=debugoptimized\"\n    else\n      opts=\"$opts -D buildtype=debug\"\n    fi\n  else\n    opts=\"$opts -D buildtype=release -D b_lto=true\"\n  fi\n\n  rm -rf build\n  meson setup build $opts\n\n  if [[ -z \"${NINJA_JOBS:-}\" ]]; then\n    # Limit to 4 jobs to avoid OOM issues\n    NINJA_JOBS=4\n  fi\n\n  # This is needed for other plugins to be built properly\n  ninja -j \"${NINJA_JOBS}\" -C build install\n  # This is where we'll grab build artifacts from\n  DESTDIR=/compiled-binaries ninja -j \"${NINJA_JOBS}\" -C build install\n  popd\ndone\n\ngst-inspect-1.0\n"
  },
  {
    "path": "build/gstreamer/install-dependencies",
    "content": "#!/bin/bash\nset -euxo pipefail\n\nexport DEBIAN_FRONTEND=noninteractive\n\napt-get update\napt-get dist-upgrade -y\napt-get install -y --no-install-recommends \\\n  bison \\\n  bubblewrap \\\n  ca-certificates \\\n  cmake \\\n  curl \\\n  flex \\\n  flite1-dev \\\n  gcc \\\n  gettext \\\n  git \\\n  gperf \\\n  iso-codes \\\n  liba52-0.7.4-dev \\\n  libaa1-dev \\\n  libaom-dev \\\n  libass-dev \\\n  libavcodec-dev \\\n  libavfilter-dev \\\n  libavformat-dev \\\n  libavutil-dev \\\n  libbs2b-dev \\\n  libbz2-dev \\\n  libcaca-dev \\\n  libcap-dev \\\n  libchromaprint-dev \\\n  libcurl4-gnutls-dev \\\n  libdca-dev \\\n  libde265-dev \\\n  libdrm-dev \\\n  libdv4-dev \\\n  libdvdnav-dev \\\n  libdvdread-dev \\\n  libdw-dev \\\n  libepoxy-dev \\\n  libfaac-dev \\\n  libfaad-dev \\\n  libfdk-aac-dev \\\n  libgbm-dev \\\n  libgcrypt20-dev \\\n  libgirepository1.0-dev \\\n  libgl-dev \\\n  libgles-dev \\\n  libglib2.0-dev \\\n  libgme-dev \\\n  libgmp-dev \\\n  libgsl-dev \\\n  libgsm1-dev \\\n  libgudev-1.0-dev \\\n  libjpeg-dev \\\n  libkate-dev \\\n  liblcms2-dev \\\n  liblilv-dev \\\n  libmjpegtools-dev \\\n  libmodplug-dev \\\n  libmp3lame-dev \\\n  libmpcdec-dev \\\n  libmpeg2-4-dev \\\n  libmpg123-dev \\\n  libofa0-dev \\\n  libogg-dev \\\n  libopencore-amrnb-dev \\\n  libopencore-amrwb-dev \\\n  libopenexr-dev \\\n  libopenjp2-7-dev \\\n  libopus-dev \\\n  liborc-0.4-dev \\\n  libpango1.0-dev \\\n  libpng-dev \\\n  libpulse-dev \\\n  librsvg2-dev \\\n  librtmp-dev \\\n  libsbc-dev \\\n  libseccomp-dev \\\n  libshout3-dev \\\n  libsndfile1-dev \\\n  libsoundtouch-dev \\\n  libsoup2.4-dev \\\n  libspandsp-dev \\\n  libspeex-dev \\\n  libsrt-gnutls-dev \\\n  libsrtp2-dev \\\n  libssl-dev \\\n  libtag1-dev \\\n  libtheora-dev \\\n  libtwolame-dev \\\n  libudev-dev \\\n  libunwind-dev \\\n  libvisual-0.4-dev \\\n  libvo-aacenc-dev \\\n  libvo-amrwbenc-dev \\\n  libvorbis-dev \\\n  libvpx-dev \\\n  libvulkan-dev \\\n  libwavpack-dev \\\n  libwebp-dev \\\n  libwebrtc-audio-processing-dev \\\n  libwildmidi-dev \\\n  libwoff-dev \\\n  libx264-dev \\\n  libx265-dev \\\n  libxkbcommon-dev \\\n  libxslt1-dev \\\n  libzbar-dev \\\n  libzvbi-dev \\\n  ninja-build \\\n  python3 \\\n  ruby \\\n  wget \\\n  xdg-dbus-proxy\n\n# install meson version needed for gstreamer 1.26.7 as it's not available in ubuntu 24.04\nMESON_VERSION=1.4.1\nMESON_BASENAME=\"meson-${MESON_VERSION}\"\nMESON_BASE_URL=\"https://github.com/mesonbuild/meson/releases/download/${MESON_VERSION}\"\n\npushd /tmp >/dev/null\ncurl -fsSL \"${MESON_BASE_URL}/${MESON_BASENAME}.tar.gz\" -o \"${MESON_BASENAME}.tar.gz\"\npopd >/dev/null\n\ntar -xzf \"/tmp/${MESON_BASENAME}.tar.gz\" -C /opt\nrm -f \"/tmp/${MESON_BASENAME}.tar.gz\"\n\ncat <<EOF >/usr/local/bin/meson\n#!/bin/sh\nexec /usr/bin/env python3 /opt/${MESON_BASENAME}/meson.py \"\\$@\"\nEOF\nchmod +x /usr/local/bin/meson\nln -sf /usr/local/bin/meson /usr/bin/meson\n\napt-get clean\nrm -rf /var/lib/apt/lists/*\n\n# install rust\ncurl -o install-rustup.sh --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs\nsh install-rustup.sh -y\nsource \"$HOME/.cargo/env\"\ncargo install cargo-c\nrm -rf install-rustup.sh\n"
  },
  {
    "path": "build/gstreamer/tag.sh",
    "content": "#!/bin/bash\n\nimage_suffix=(base dev prod prod-rs)\narchs=(amd64 arm64)\ngst_version=$1\n\nfor suffix in ${image_suffix[*]}\ndo\n    digests=()\n    for arch in ${archs[*]}\n    do\n        digest=`docker manifest inspect livekit/gstreamer:$gst_version-$suffix-$arch | jq \".manifests[] | select(.platform.architecture == \\\"$arch\\\").digest\"`\n        # remove quotes\n        digest=${digest:1:$[${#digest}-2]}\n        digests+=($digest)\n    done\n\n    manifests=\"\"\n    for digest in ${digests[*]}\n    do\n        manifests+=\" livekit/gstreamer@$digest\"\n    done\n\n    docker manifest create livekit/gstreamer:$gst_version-$suffix$manifests\n    docker manifest push livekit/gstreamer:$gst_version-$suffix\ndone\n"
  },
  {
    "path": "build/template/Dockerfile",
    "content": "# Copyright 2023 LiveKit, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nFROM ubuntu:24.04\n\nWORKDIR /workspace\n\nRUN apt update\nRUN apt install -y curl\nRUN curl -sL https://deb.nodesource.com/setup_22.x | bash -\nRUN apt update\nRUN apt install -y nodejs\nRUN npm install -g pnpm\n\n# copy templates\nCOPY template-default/ .\n\n# build\nRUN pnpm install\nRUN pnpm build\n"
  },
  {
    "path": "build/test/Dockerfile",
    "content": "# Copyright 2023 LiveKit, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# syntax=docker/dockerfile:1.6\nARG TARGETPLATFORM\nARG TEMPLATE_TAG=latest\nARG GO_VERSION=1.26.1\nARG LINT_VERSION=v2.11.3\n\nFROM livekit/egress-templates:$TEMPLATE_TAG AS template\n\nFROM livekit/gstreamer:1.24.12-dev AS builder\n\nWORKDIR /workspace\n\nARG TARGETPLATFORM\n\n# Deadlock 0 = off, 1 = on\nARG DEADLOCK=0\nARG GO_VERSION\n\n# install go\nRUN if [ \"$TARGETPLATFORM\" = \"linux/arm64\" ]; then GOARCH=arm64; else GOARCH=amd64; fi && \\\n    wget https://go.dev/dl/go${GO_VERSION}.linux-${GOARCH}.tar.gz && \\\n    rm -rf /usr/local/go && \\\n    tar -C /usr/local -xzf go${GO_VERSION}.linux-${GOARCH}.tar.gz\nENV PATH=\"/usr/local/go/bin:${PATH}\"\n\nENV GOMODCACHE=/go/pkg/mod \\\n    GOCACHE=/go/build-cache\n\n# download go modules\nCOPY go.mod .\nCOPY go.sum .\nRUN --mount=type=cache,target=/go/pkg/mod,sharing=locked \\\n    --mount=type=cache,target=/go/build-cache \\\n    go mod download\n\n# copy source\nCOPY cmd/ cmd/\nCOPY pkg/ pkg/\nCOPY test/ test/\nCOPY version/ version/\n\n# copy templates\nCOPY --from=template workspace/build/ cmd/server/templates/\nCOPY --from=template workspace/build/ test/templates/\n\n# build (service tests will need to launch the handler)\nRUN --mount=type=cache,target=/go/pkg/mod,sharing=locked \\\n    --mount=type=cache,target=/go/build-cache \\\n    if [ \"$TARGETPLATFORM\" = \"linux/arm64\" ]; then GOARCH=arm64; else GOARCH=amd64; fi && \\\n    TAGS=\"\"; \\\n    if [ \"${DEADLOCK:-0}\" = \"1\" ]; then TAGS=\"deadlock\"; fi; \\\n    CGO_ENABLED=1 GOOS=linux GOARCH=${GOARCH} GODEBUG=disablethp=1 go build ${TAGS:+-tags} ${TAGS:+\"$TAGS\"} -o egress ./cmd/server\n\nRUN --mount=type=cache,target=/go/pkg/mod,sharing=locked \\\n    --mount=type=cache,target=/go/build-cache \\\n    if [ \"$TARGETPLATFORM\" = \"linux/arm64\" ]; then GOARCH=arm64; else GOARCH=amd64; fi && \\\n    CGO_ENABLED=1 GOOS=linux GOARCH=${GOARCH} go test -c -v -race --tags=integration ./test\n\nFROM golangci/golangci-lint:${LINT_VERSION} AS golangci\n\nFROM builder AS lint\n\nCOPY --from=golangci /usr/bin/golangci-lint /usr/local/bin/golangci-lint\nCOPY .golangci.yaml .\n\nRUN --mount=type=cache,target=/go/pkg/mod,sharing=locked \\\n    --mount=type=cache,target=/go/build-cache \\\n    CGO_ENABLED=1 \\\n    golangci-lint run \\\n        --timeout=5m \\\n        --modules-download-mode=mod \\\n        --build-tags=\"${LINT_TAGS}\"\nRUN echo ok >/lint_ok\n\n\nFROM livekit/gstreamer:1.24.12-prod\n\nARG TARGETPLATFORM\n\n# install deps\nRUN apt-get update && \\\n    apt-get install -y \\\n        curl \\\n        ffmpeg \\\n        fonts-noto \\\n        gnupg \\\n        pulseaudio \\\n        python3 \\\n        python3-pip \\\n        unzip \\\n        wget \\\n        xvfb \\\n        gstreamer1.0-plugins-base-\n\n# install go\nCOPY --from=1 /usr/local/go /usr/local/go\nENV PATH=\"/usr/local/go/bin:${PATH}\"\n\n# install chrome\nCOPY --from=livekit/chrome-installer:146.0.7680.177-1 /chrome-installer /chrome-installer\n\nRUN /chrome-installer/install-chrome \"$TARGETPLATFORM\"\n\n# clean up\nRUN rm -rf /var/lib/apt/lists/*\n\n# install rtsp server\nRUN if [ \"$TARGETPLATFORM\" = \"linux/arm64\" ]; then ARCH=arm64v8; else ARCH=amd64; fi && \\\n    wget https://github.com/bluenviron/mediamtx/releases/download/v1.8.1/mediamtx_v1.8.1_linux_${ARCH}.tar.gz && \\\n    tar -zxvf mediamtx_v1.8.1_linux_${ARCH}.tar.gz && \\\n    rm mediamtx_v1.8.1_linux_${ARCH}.tar.gz && \\\n    sed -i 's_record: no_record: yes_g' mediamtx.yml && \\\n    sed -i 's_recordPath: ./recordings/%path/_recordPath: /out/output/stream-_g' mediamtx.yml\n\n# create egress user\nRUN useradd -ms /bin/bash -g root -G sudo,pulse,pulse-access egress\nRUN mkdir -p home/egress/tmp home/egress/.cache/xdgr && \\\n    chown -R egress /home/egress\n\n# copy files\nCOPY test/agents/requirements.txt /agents/requirements.txt\nRUN pip install --break-system-packages --no-cache-dir -r /agents/requirements.txt\n\nCOPY test/ /workspace/test/\nCOPY --from=1 /workspace/egress /bin/\nCOPY --from=1 /workspace/test.test .\nCOPY media-samples /media-samples\nCOPY --from=1 /workspace/test/agents /agents\nCOPY build/test/entrypoint.sh .\n\n# Force lint stage to run successfully\nCOPY --from=lint /lint_ok /__lint_ok\n\n# run tests\nUSER egress\nENV PATH=${PATH}:/chrome\nENV XDG_RUNTIME_DIR=/home/egress/.cache/xdgr\nENV CHROME_DEVEL_SANDBOX=/usr/local/sbin/chrome-devel-sandbox\nENTRYPOINT [\"./entrypoint.sh\"]\n"
  },
  {
    "path": "build/test/entrypoint.sh",
    "content": "#!/usr/bin/env bash\n# Copyright 2023 LiveKit, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nset -eo pipefail\n\n# Start pulseaudio\nrm -rf /var/run/pulse /var/lib/pulse /home/egress/.config/pulse /home/egress/.cache/xdgr/pulse\npulseaudio -D --verbose --exit-idle-time=-1 --disallow-exit > /dev/null 2>&1\n\n# Run RTSP server\n./mediamtx > /dev/null 2>&1 &\n\n# Run tests\nif [[ -z ${GITHUB_WORKFLOW+x} ]]; then\n  exec ./test.test -test.v -test.timeout 30m\nelse\n  go install github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest\n  exec go tool test2json -p egress ./test.test -test.v -test.timeout 30m 2>&1 | \"$HOME\"/go/bin/gotestfmt\nfi\n"
  },
  {
    "path": "build/test/fetch-media-samples.sh",
    "content": "#!/usr/bin/env bash\nset -euo pipefail\n\nREPO=\"livekit/media-samples\"\nDEST=\"media-samples\"\nREF=\"${1:-main}\"\n\nexport GIT_TERMINAL_PROMPT=0\n\nif ! command -v git-lfs >/dev/null 2>&1; then\n  echo \"git-lfs not found. Install it (brew install git-lfs / apt-get install git-lfs)\" >&2\n  exit 1\nfi\ngit lfs install --local\n\n# run git with an Authorization header only if GITHUB_TOKEN is set\ng() {\n  if [[ -n \"${GITHUB_TOKEN:-}\" ]]; then\n    local b64\n    b64=\"$(printf 'x-access-token:%s' \"$GITHUB_TOKEN\" | base64)\"\n    git -c \"http.https://github.com/.extraheader=AUTHORIZATION: basic $b64\" \"$@\"\n  else\n    git \"$@\"\n  fi\n}\n\nif [[ -d \"$DEST/.git\" ]]; then\n  git -C \"$DEST\" config core.hooksPath /dev/null\n  g -C \"$DEST\" fetch --depth=1 origin \"$REF\"\n  git -C \"$DEST\" checkout -f FETCH_HEAD\nelse\n  tmpl=\"$(mktemp -d)\"\n  g -c core.hooksPath=/dev/null \\\n    clone --template \"$tmpl\" --depth 1 --branch \"$REF\" \\\n    \"https://github.com/${REPO}.git\" \"$DEST\"\n  rm -rf \"$tmpl\"\nfi\n\ng -C \"$DEST\" lfs pull\n\n"
  },
  {
    "path": "chrome-sandboxing-seccomp-profile.json",
    "content": "{\n\t\"defaultAction\": \"SCMP_ACT_ERRNO\",\n\t\"defaultErrnoRet\": 1,\n\t\"archMap\": [\n\t\t{\n\t\t\t\"architecture\": \"SCMP_ARCH_X86_64\",\n\t\t\t\"subArchitectures\": [\n\t\t\t\t\"SCMP_ARCH_X86\",\n\t\t\t\t\"SCMP_ARCH_X32\"\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"architecture\": \"SCMP_ARCH_AARCH64\",\n\t\t\t\"subArchitectures\": [\n\t\t\t\t\"SCMP_ARCH_ARM\"\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"architecture\": \"SCMP_ARCH_MIPS64\",\n\t\t\t\"subArchitectures\": [\n\t\t\t\t\"SCMP_ARCH_MIPS\",\n\t\t\t\t\"SCMP_ARCH_MIPS64N32\"\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"architecture\": \"SCMP_ARCH_MIPS64N32\",\n\t\t\t\"subArchitectures\": [\n\t\t\t\t\"SCMP_ARCH_MIPS\",\n\t\t\t\t\"SCMP_ARCH_MIPS64\"\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"architecture\": \"SCMP_ARCH_MIPSEL64\",\n\t\t\t\"subArchitectures\": [\n\t\t\t\t\"SCMP_ARCH_MIPSEL\",\n\t\t\t\t\"SCMP_ARCH_MIPSEL64N32\"\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"architecture\": \"SCMP_ARCH_MIPSEL64N32\",\n\t\t\t\"subArchitectures\": [\n\t\t\t\t\"SCMP_ARCH_MIPSEL\",\n\t\t\t\t\"SCMP_ARCH_MIPSEL64\"\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"architecture\": \"SCMP_ARCH_S390X\",\n\t\t\t\"subArchitectures\": [\n\t\t\t\t\"SCMP_ARCH_S390\"\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"architecture\": \"SCMP_ARCH_RISCV64\",\n\t\t\t\"subArchitectures\": null\n\t\t}\n\t],\n\t\"syscalls\": [\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"accept\",\n\t\t\t\t\"accept4\",\n\t\t\t\t\"access\",\n\t\t\t\t\"adjtimex\",\n\t\t\t\t\"alarm\",\n\t\t\t\t\"bind\",\n\t\t\t\t\"brk\",\n\t\t\t\t\"capget\",\n\t\t\t\t\"capset\",\n\t\t\t\t\"chdir\",\n\t\t\t\t\"chmod\",\n\t\t\t\t\"chown\",\n\t\t\t\t\"chown32\",\n\t\t\t\t\"clock_adjtime\",\n\t\t\t\t\"clock_adjtime64\",\n\t\t\t\t\"clock_getres\",\n\t\t\t\t\"clock_getres_time64\",\n\t\t\t\t\"clock_gettime\",\n\t\t\t\t\"clock_gettime64\",\n\t\t\t\t\"clock_nanosleep\",\n\t\t\t\t\"clock_nanosleep_time64\",\n\t\t\t\t\"close\",\n\t\t\t\t\"close_range\",\n\t\t\t\t\"connect\",\n\t\t\t\t\"copy_file_range\",\n\t\t\t\t\"creat\",\n\t\t\t\t\"dup\",\n\t\t\t\t\"dup2\",\n\t\t\t\t\"dup3\",\n\t\t\t\t\"epoll_create\",\n\t\t\t\t\"epoll_create1\",\n\t\t\t\t\"epoll_ctl\",\n\t\t\t\t\"epoll_ctl_old\",\n\t\t\t\t\"epoll_pwait\",\n\t\t\t\t\"epoll_pwait2\",\n\t\t\t\t\"epoll_wait\",\n\t\t\t\t\"epoll_wait_old\",\n\t\t\t\t\"eventfd\",\n\t\t\t\t\"eventfd2\",\n\t\t\t\t\"execve\",\n\t\t\t\t\"execveat\",\n\t\t\t\t\"exit\",\n\t\t\t\t\"exit_group\",\n\t\t\t\t\"faccessat\",\n\t\t\t\t\"faccessat2\",\n\t\t\t\t\"fadvise64\",\n\t\t\t\t\"fadvise64_64\",\n\t\t\t\t\"fallocate\",\n\t\t\t\t\"fanotify_mark\",\n\t\t\t\t\"fchdir\",\n\t\t\t\t\"fchmod\",\n\t\t\t\t\"fchmodat\",\n\t\t\t\t\"fchown\",\n\t\t\t\t\"fchown32\",\n\t\t\t\t\"fchownat\",\n\t\t\t\t\"fcntl\",\n\t\t\t\t\"fcntl64\",\n\t\t\t\t\"fdatasync\",\n\t\t\t\t\"fgetxattr\",\n\t\t\t\t\"flistxattr\",\n\t\t\t\t\"flock\",\n\t\t\t\t\"fork\",\n\t\t\t\t\"fremovexattr\",\n\t\t\t\t\"fsetxattr\",\n\t\t\t\t\"fstat\",\n\t\t\t\t\"fstat64\",\n\t\t\t\t\"fstatat64\",\n\t\t\t\t\"fstatfs\",\n\t\t\t\t\"fstatfs64\",\n\t\t\t\t\"fsync\",\n\t\t\t\t\"ftruncate\",\n\t\t\t\t\"ftruncate64\",\n\t\t\t\t\"futex\",\n\t\t\t\t\"futex_time64\",\n\t\t\t\t\"futex_waitv\",\n\t\t\t\t\"futimesat\",\n\t\t\t\t\"getcpu\",\n\t\t\t\t\"getcwd\",\n\t\t\t\t\"getdents\",\n\t\t\t\t\"getdents64\",\n\t\t\t\t\"getegid\",\n\t\t\t\t\"getegid32\",\n\t\t\t\t\"geteuid\",\n\t\t\t\t\"geteuid32\",\n\t\t\t\t\"getgid\",\n\t\t\t\t\"getgid32\",\n\t\t\t\t\"getgroups\",\n\t\t\t\t\"getgroups32\",\n\t\t\t\t\"getitimer\",\n\t\t\t\t\"getpeername\",\n\t\t\t\t\"getpgid\",\n\t\t\t\t\"getpgrp\",\n\t\t\t\t\"getpid\",\n\t\t\t\t\"getppid\",\n\t\t\t\t\"getpriority\",\n\t\t\t\t\"getrandom\",\n\t\t\t\t\"getresgid\",\n\t\t\t\t\"getresgid32\",\n\t\t\t\t\"getresuid\",\n\t\t\t\t\"getresuid32\",\n\t\t\t\t\"getrlimit\",\n\t\t\t\t\"get_robust_list\",\n\t\t\t\t\"getrusage\",\n\t\t\t\t\"getsid\",\n\t\t\t\t\"getsockname\",\n\t\t\t\t\"getsockopt\",\n\t\t\t\t\"get_thread_area\",\n\t\t\t\t\"gettid\",\n\t\t\t\t\"gettimeofday\",\n\t\t\t\t\"getuid\",\n\t\t\t\t\"getuid32\",\n\t\t\t\t\"getxattr\",\n\t\t\t\t\"inotify_add_watch\",\n\t\t\t\t\"inotify_init\",\n\t\t\t\t\"inotify_init1\",\n\t\t\t\t\"inotify_rm_watch\",\n\t\t\t\t\"io_cancel\",\n\t\t\t\t\"ioctl\",\n\t\t\t\t\"io_destroy\",\n\t\t\t\t\"io_getevents\",\n\t\t\t\t\"io_pgetevents\",\n\t\t\t\t\"io_pgetevents_time64\",\n\t\t\t\t\"ioprio_get\",\n\t\t\t\t\"ioprio_set\",\n\t\t\t\t\"io_setup\",\n\t\t\t\t\"io_submit\",\n\t\t\t\t\"io_uring_enter\",\n\t\t\t\t\"io_uring_register\",\n\t\t\t\t\"io_uring_setup\",\n\t\t\t\t\"ipc\",\n\t\t\t\t\"kill\",\n\t\t\t\t\"landlock_add_rule\",\n\t\t\t\t\"landlock_create_ruleset\",\n\t\t\t\t\"landlock_restrict_self\",\n\t\t\t\t\"lchown\",\n\t\t\t\t\"lchown32\",\n\t\t\t\t\"lgetxattr\",\n\t\t\t\t\"link\",\n\t\t\t\t\"linkat\",\n\t\t\t\t\"listen\",\n\t\t\t\t\"listxattr\",\n\t\t\t\t\"llistxattr\",\n\t\t\t\t\"_llseek\",\n\t\t\t\t\"lremovexattr\",\n\t\t\t\t\"lseek\",\n\t\t\t\t\"lsetxattr\",\n\t\t\t\t\"lstat\",\n\t\t\t\t\"lstat64\",\n\t\t\t\t\"madvise\",\n\t\t\t\t\"membarrier\",\n\t\t\t\t\"memfd_create\",\n\t\t\t\t\"memfd_secret\",\n\t\t\t\t\"mincore\",\n\t\t\t\t\"mkdir\",\n\t\t\t\t\"mkdirat\",\n\t\t\t\t\"mknod\",\n\t\t\t\t\"mknodat\",\n\t\t\t\t\"mlock\",\n\t\t\t\t\"mlock2\",\n\t\t\t\t\"mlockall\",\n\t\t\t\t\"mmap\",\n\t\t\t\t\"mmap2\",\n\t\t\t\t\"mprotect\",\n\t\t\t\t\"mq_getsetattr\",\n\t\t\t\t\"mq_notify\",\n\t\t\t\t\"mq_open\",\n\t\t\t\t\"mq_timedreceive\",\n\t\t\t\t\"mq_timedreceive_time64\",\n\t\t\t\t\"mq_timedsend\",\n\t\t\t\t\"mq_timedsend_time64\",\n\t\t\t\t\"mq_unlink\",\n\t\t\t\t\"mremap\",\n\t\t\t\t\"msgctl\",\n\t\t\t\t\"msgget\",\n\t\t\t\t\"msgrcv\",\n\t\t\t\t\"msgsnd\",\n\t\t\t\t\"msync\",\n\t\t\t\t\"munlock\",\n\t\t\t\t\"munlockall\",\n\t\t\t\t\"munmap\",\n\t\t\t\t\"name_to_handle_at\",\n\t\t\t\t\"nanosleep\",\n\t\t\t\t\"newfstatat\",\n\t\t\t\t\"_newselect\",\n\t\t\t\t\"open\",\n\t\t\t\t\"openat\",\n\t\t\t\t\"openat2\",\n\t\t\t\t\"pause\",\n\t\t\t\t\"pidfd_open\",\n\t\t\t\t\"pidfd_send_signal\",\n\t\t\t\t\"pipe\",\n\t\t\t\t\"pipe2\",\n\t\t\t\t\"pkey_alloc\",\n\t\t\t\t\"pkey_free\",\n\t\t\t\t\"pkey_mprotect\",\n\t\t\t\t\"poll\",\n\t\t\t\t\"ppoll\",\n\t\t\t\t\"ppoll_time64\",\n\t\t\t\t\"prctl\",\n\t\t\t\t\"pread64\",\n\t\t\t\t\"preadv\",\n\t\t\t\t\"preadv2\",\n\t\t\t\t\"prlimit64\",\n\t\t\t\t\"process_mrelease\",\n\t\t\t\t\"pselect6\",\n\t\t\t\t\"pselect6_time64\",\n\t\t\t\t\"pwrite64\",\n\t\t\t\t\"pwritev\",\n\t\t\t\t\"pwritev2\",\n\t\t\t\t\"read\",\n\t\t\t\t\"readahead\",\n\t\t\t\t\"readlink\",\n\t\t\t\t\"readlinkat\",\n\t\t\t\t\"readv\",\n\t\t\t\t\"recv\",\n\t\t\t\t\"recvfrom\",\n\t\t\t\t\"recvmmsg\",\n\t\t\t\t\"recvmmsg_time64\",\n\t\t\t\t\"recvmsg\",\n\t\t\t\t\"remap_file_pages\",\n\t\t\t\t\"removexattr\",\n\t\t\t\t\"rename\",\n\t\t\t\t\"renameat\",\n\t\t\t\t\"renameat2\",\n\t\t\t\t\"restart_syscall\",\n\t\t\t\t\"rmdir\",\n\t\t\t\t\"rseq\",\n\t\t\t\t\"rt_sigaction\",\n\t\t\t\t\"rt_sigpending\",\n\t\t\t\t\"rt_sigprocmask\",\n\t\t\t\t\"rt_sigqueueinfo\",\n\t\t\t\t\"rt_sigreturn\",\n\t\t\t\t\"rt_sigsuspend\",\n\t\t\t\t\"rt_sigtimedwait\",\n\t\t\t\t\"rt_sigtimedwait_time64\",\n\t\t\t\t\"rt_tgsigqueueinfo\",\n\t\t\t\t\"sched_getaffinity\",\n\t\t\t\t\"sched_getattr\",\n\t\t\t\t\"sched_getparam\",\n\t\t\t\t\"sched_get_priority_max\",\n\t\t\t\t\"sched_get_priority_min\",\n\t\t\t\t\"sched_getscheduler\",\n\t\t\t\t\"sched_rr_get_interval\",\n\t\t\t\t\"sched_rr_get_interval_time64\",\n\t\t\t\t\"sched_setaffinity\",\n\t\t\t\t\"sched_setattr\",\n\t\t\t\t\"sched_setparam\",\n\t\t\t\t\"sched_setscheduler\",\n\t\t\t\t\"sched_yield\",\n\t\t\t\t\"seccomp\",\n\t\t\t\t\"select\",\n\t\t\t\t\"semctl\",\n\t\t\t\t\"semget\",\n\t\t\t\t\"semop\",\n\t\t\t\t\"semtimedop\",\n\t\t\t\t\"semtimedop_time64\",\n\t\t\t\t\"send\",\n\t\t\t\t\"sendfile\",\n\t\t\t\t\"sendfile64\",\n\t\t\t\t\"sendmmsg\",\n\t\t\t\t\"sendmsg\",\n\t\t\t\t\"sendto\",\n\t\t\t\t\"setfsgid\",\n\t\t\t\t\"setfsgid32\",\n\t\t\t\t\"setfsuid\",\n\t\t\t\t\"setfsuid32\",\n\t\t\t\t\"setgid\",\n\t\t\t\t\"setgid32\",\n\t\t\t\t\"setgroups\",\n\t\t\t\t\"setgroups32\",\n\t\t\t\t\"setitimer\",\n\t\t\t\t\"setpgid\",\n\t\t\t\t\"setpriority\",\n\t\t\t\t\"setregid\",\n\t\t\t\t\"setregid32\",\n\t\t\t\t\"setresgid\",\n\t\t\t\t\"setresgid32\",\n\t\t\t\t\"setresuid\",\n\t\t\t\t\"setresuid32\",\n\t\t\t\t\"setreuid\",\n\t\t\t\t\"setreuid32\",\n\t\t\t\t\"setrlimit\",\n\t\t\t\t\"set_robust_list\",\n\t\t\t\t\"setsid\",\n\t\t\t\t\"setsockopt\",\n\t\t\t\t\"set_thread_area\",\n\t\t\t\t\"set_tid_address\",\n\t\t\t\t\"setuid\",\n\t\t\t\t\"setuid32\",\n\t\t\t\t\"setxattr\",\n\t\t\t\t\"shmat\",\n\t\t\t\t\"shmctl\",\n\t\t\t\t\"shmdt\",\n\t\t\t\t\"shmget\",\n\t\t\t\t\"shutdown\",\n\t\t\t\t\"sigaltstack\",\n\t\t\t\t\"signalfd\",\n\t\t\t\t\"signalfd4\",\n\t\t\t\t\"sigprocmask\",\n\t\t\t\t\"sigreturn\",\n\t\t\t\t\"socketcall\",\n\t\t\t\t\"socketpair\",\n\t\t\t\t\"splice\",\n\t\t\t\t\"stat\",\n\t\t\t\t\"stat64\",\n\t\t\t\t\"statfs\",\n\t\t\t\t\"statfs64\",\n\t\t\t\t\"statx\",\n\t\t\t\t\"symlink\",\n\t\t\t\t\"symlinkat\",\n\t\t\t\t\"sync\",\n\t\t\t\t\"sync_file_range\",\n\t\t\t\t\"syncfs\",\n\t\t\t\t\"sysinfo\",\n\t\t\t\t\"tee\",\n\t\t\t\t\"tgkill\",\n\t\t\t\t\"time\",\n\t\t\t\t\"timer_create\",\n\t\t\t\t\"timer_delete\",\n\t\t\t\t\"timer_getoverrun\",\n\t\t\t\t\"timer_gettime\",\n\t\t\t\t\"timer_gettime64\",\n\t\t\t\t\"timer_settime\",\n\t\t\t\t\"timer_settime64\",\n\t\t\t\t\"timerfd_create\",\n\t\t\t\t\"timerfd_gettime\",\n\t\t\t\t\"timerfd_gettime64\",\n\t\t\t\t\"timerfd_settime\",\n\t\t\t\t\"timerfd_settime64\",\n\t\t\t\t\"times\",\n\t\t\t\t\"tkill\",\n\t\t\t\t\"truncate\",\n\t\t\t\t\"truncate64\",\n\t\t\t\t\"ugetrlimit\",\n\t\t\t\t\"umask\",\n\t\t\t\t\"uname\",\n\t\t\t\t\"unlink\",\n\t\t\t\t\"unlinkat\",\n\t\t\t\t\"utime\",\n\t\t\t\t\"utimensat\",\n\t\t\t\t\"utimensat_time64\",\n\t\t\t\t\"utimes\",\n\t\t\t\t\"vfork\",\n\t\t\t\t\"vmsplice\",\n\t\t\t\t\"wait4\",\n\t\t\t\t\"waitid\",\n\t\t\t\t\"waitpid\",\n\t\t\t\t\"write\",\n\t\t\t\t\"writev\",\n\t\t\t\t\"clone\",\n\t\t\t\t\"unshare\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\"\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"process_vm_readv\",\n\t\t\t\t\"process_vm_writev\",\n\t\t\t\t\"ptrace\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"minKernel\": \"4.8\"\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"socket\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"args\": [\n\t\t\t\t{\n\t\t\t\t\t\"index\": 0,\n\t\t\t\t\t\"value\": 40,\n\t\t\t\t\t\"op\": \"SCMP_CMP_NE\"\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"personality\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"args\": [\n\t\t\t\t{\n\t\t\t\t\t\"index\": 0,\n\t\t\t\t\t\"value\": 0,\n\t\t\t\t\t\"op\": \"SCMP_CMP_EQ\"\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"personality\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"args\": [\n\t\t\t\t{\n\t\t\t\t\t\"index\": 0,\n\t\t\t\t\t\"value\": 8,\n\t\t\t\t\t\"op\": \"SCMP_CMP_EQ\"\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"personality\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"args\": [\n\t\t\t\t{\n\t\t\t\t\t\"index\": 0,\n\t\t\t\t\t\"value\": 131072,\n\t\t\t\t\t\"op\": \"SCMP_CMP_EQ\"\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"personality\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"args\": [\n\t\t\t\t{\n\t\t\t\t\t\"index\": 0,\n\t\t\t\t\t\"value\": 131080,\n\t\t\t\t\t\"op\": \"SCMP_CMP_EQ\"\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"personality\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"args\": [\n\t\t\t\t{\n\t\t\t\t\t\"index\": 0,\n\t\t\t\t\t\"value\": 4294967295,\n\t\t\t\t\t\"op\": \"SCMP_CMP_EQ\"\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"sync_file_range2\",\n\t\t\t\t\"swapcontext\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"arches\": [\n\t\t\t\t\t\"ppc64le\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"arm_fadvise64_64\",\n\t\t\t\t\"arm_sync_file_range\",\n\t\t\t\t\"sync_file_range2\",\n\t\t\t\t\"breakpoint\",\n\t\t\t\t\"cacheflush\",\n\t\t\t\t\"set_tls\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"arches\": [\n\t\t\t\t\t\"arm\",\n\t\t\t\t\t\"arm64\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"arch_prctl\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"arches\": [\n\t\t\t\t\t\"amd64\",\n\t\t\t\t\t\"x32\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"modify_ldt\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"arches\": [\n\t\t\t\t\t\"amd64\",\n\t\t\t\t\t\"x32\",\n\t\t\t\t\t\"x86\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"s390_pci_mmio_read\",\n\t\t\t\t\"s390_pci_mmio_write\",\n\t\t\t\t\"s390_runtime_instr\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"arches\": [\n\t\t\t\t\t\"s390\",\n\t\t\t\t\t\"s390x\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"riscv_flush_icache\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"arches\": [\n\t\t\t\t\t\"riscv64\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"open_by_handle_at\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_DAC_READ_SEARCH\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"bpf\",\n\t\t\t\t\"clone3\",\n\t\t\t\t\"fanotify_init\",\n\t\t\t\t\"fsconfig\",\n\t\t\t\t\"fsmount\",\n\t\t\t\t\"fsopen\",\n\t\t\t\t\"fspick\",\n\t\t\t\t\"lookup_dcookie\",\n\t\t\t\t\"mount\",\n\t\t\t\t\"mount_setattr\",\n\t\t\t\t\"move_mount\",\n\t\t\t\t\"open_tree\",\n\t\t\t\t\"perf_event_open\",\n\t\t\t\t\"quotactl\",\n\t\t\t\t\"quotactl_fd\",\n\t\t\t\t\"setdomainname\",\n\t\t\t\t\"sethostname\",\n\t\t\t\t\"setns\",\n\t\t\t\t\"syslog\",\n\t\t\t\t\"umount\",\n\t\t\t\t\"umount2\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_SYS_ADMIN\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"clone3\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ERRNO\",\n\t\t\t\"errnoRet\": 38,\n\t\t\t\"excludes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_SYS_ADMIN\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"reboot\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_SYS_BOOT\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"chroot\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_SYS_CHROOT\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"delete_module\",\n\t\t\t\t\"init_module\",\n\t\t\t\t\"finit_module\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_SYS_MODULE\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"acct\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_SYS_PACCT\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"kcmp\",\n\t\t\t\t\"pidfd_getfd\",\n\t\t\t\t\"process_madvise\",\n\t\t\t\t\"process_vm_readv\",\n\t\t\t\t\"process_vm_writev\",\n\t\t\t\t\"ptrace\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_SYS_PTRACE\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"iopl\",\n\t\t\t\t\"ioperm\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_SYS_RAWIO\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"settimeofday\",\n\t\t\t\t\"stime\",\n\t\t\t\t\"clock_settime\",\n\t\t\t\t\"clock_settime64\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_SYS_TIME\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"vhangup\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_SYS_TTY_CONFIG\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"get_mempolicy\",\n\t\t\t\t\"mbind\",\n\t\t\t\t\"set_mempolicy\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_SYS_NICE\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"syslog\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_SYSLOG\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"bpf\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_BPF\"\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"names\": [\n\t\t\t\t\"perf_event_open\"\n\t\t\t],\n\t\t\t\"action\": \"SCMP_ACT_ALLOW\",\n\t\t\t\"includes\": {\n\t\t\t\t\"caps\": [\n\t\t\t\t\t\"CAP_PERFMON\"\n\t\t\t\t]\n\t\t\t}\n\t\t}\n\t]\n}\n"
  },
  {
    "path": "cmd/server/http.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage main\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/livekit/egress/pkg/server\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\ntype httpHandler struct {\n\tsvc *server.Server\n}\n\nfunc (h *httpHandler) ServeHTTP(w http.ResponseWriter, _ *http.Request) {\n\tinfo, err := h.svc.Status()\n\tif err != nil {\n\t\tlogger.Errorw(\"failed to read status\", err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t_, _ = w.Write(info)\n}\n"
  },
  {
    "path": "cmd/server/main.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"embed\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/signal\"\n\t\"syscall\"\n\n\t\"github.com/urfave/cli/v3\"\n\t\"google.golang.org/protobuf/encoding/protojson\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/handler\"\n\t\"github.com/livekit/egress/pkg/info\"\n\t\"github.com/livekit/egress/pkg/server\"\n\t\"github.com/livekit/egress/version\"\n\t\"github.com/livekit/protocol/logger\"\n\tlkredis \"github.com/livekit/protocol/redis\"\n\t\"github.com/livekit/protocol/rpc\"\n\t_ \"github.com/livekit/protocol/utils/hwstats/maxprocs\"\n\t\"github.com/livekit/psrpc\"\n)\n\nvar (\n\t//go:embed templates\n\ttemplateEmbedFs embed.FS\n)\n\nfunc main() {\n\tcmd := &cli.Command{\n\t\tName:        \"egress\",\n\t\tUsage:       \"LiveKit Egress\",\n\t\tVersion:     version.Version,\n\t\tDescription: \"runs the recorder in standalone mode or as a service\",\n\t\tCommands: []*cli.Command{\n\t\t\t{\n\t\t\t\tName:        \"run-handler\",\n\t\t\t\tDescription: \"runs a request in a new process\",\n\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"request\",\n\t\t\t\t\t},\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"config\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAction: runHandler,\n\t\t\t\tHidden: true,\n\t\t\t},\n\t\t},\n\t\tFlags: []cli.Flag{\n\t\t\t&cli.StringFlag{\n\t\t\t\tName:    \"config\",\n\t\t\t\tUsage:   \"LiveKit Egress yaml config file\",\n\t\t\t\tSources: cli.EnvVars(\"EGRESS_CONFIG_FILE\"),\n\t\t\t},\n\t\t\t&cli.StringFlag{\n\t\t\t\tName:    \"config-body\",\n\t\t\t\tUsage:   \"LiveKit Egress yaml config body\",\n\t\t\t\tSources: cli.EnvVars(\"EGRESS_CONFIG_BODY\"),\n\t\t\t},\n\t\t},\n\t\tAction: runService,\n\t}\n\n\tif err := cmd.Run(context.Background(), os.Args); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc runService(_ context.Context, c *cli.Command) error {\n\tconfigFile := c.String(\"config\")\n\tconfigBody := c.String(\"config-body\")\n\tif configBody == \"\" {\n\t\tif configFile == \"\" {\n\t\t\treturn errors.ErrNoConfig\n\t\t}\n\t\tcontent, err := os.ReadFile(configFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconfigBody = string(content)\n\t}\n\n\tconf, err := config.NewServiceConfig(configBody)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trc, err := lkredis.GetRedisClient(conf.Redis)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbus := psrpc.NewRedisMessageBus(rc)\n\tioClient, err := info.NewSessionReporter(&conf.BaseConfig, bus)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsvc, err := server.NewServer(conf, bus, ioClient)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif conf.HealthPort != 0 {\n\t\tgo func() {\n\t\t\t_ = http.ListenAndServe(fmt.Sprintf(\":%d\", conf.HealthPort), &httpHandler{svc: svc})\n\t\t}()\n\t}\n\n\tstopChan := make(chan os.Signal, 1)\n\tsignal.Notify(stopChan, syscall.SIGTERM, syscall.SIGQUIT)\n\n\tkillChan := make(chan os.Signal, 1)\n\tsignal.Notify(killChan, syscall.SIGINT)\n\n\tgo func() {\n\t\tselect {\n\t\tcase sig := <-stopChan:\n\t\t\tlogger.Infow(\"exit requested, finishing recording then shutting down\", \"signal\", sig)\n\t\t\tsvc.Shutdown(true, false)\n\t\tcase sig := <-killChan:\n\t\t\tlogger.Infow(\"exit requested, stopping recording and shutting down\", \"signal\", sig)\n\t\t\tsvc.Shutdown(true, true)\n\t\t}\n\t}()\n\n\trfs, err := fs.Sub(templateEmbedFs, \"templates\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = svc.StartTemplatesServer(rfs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn svc.Run()\n}\n\nfunc runHandler(_ context.Context, c *cli.Command) error {\n\tconfigBody := c.String(\"config\")\n\tif configBody == \"\" {\n\t\treturn errors.ErrNoConfig\n\t}\n\n\treq := &rpc.StartEgressRequest{}\n\treqString := c.String(\"request\")\n\terr := protojson.Unmarshal([]byte(reqString), req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconf, err := config.NewPipelineConfig(configBody, req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Debugw(\"handler launched\")\n\n\terr = os.MkdirAll(conf.TmpDir, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.RemoveAll(conf.TmpDir)\n\t_ = os.Setenv(\"TMPDIR\", conf.TmpDir)\n\n\trc, err := lkredis.GetRedisClient(conf.Redis)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkillChan := make(chan os.Signal, 1)\n\tsignal.Notify(killChan, syscall.SIGINT)\n\n\tbus := psrpc.NewRedisMessageBus(rc)\n\th, err := handler.NewHandler(conf, bus)\n\tif err != nil {\n\t\t// service will send info update and shut down\n\t\tlogger.Errorw(\"failed to create handler\", err)\n\t\treturn err\n\t}\n\n\tgo func() {\n\t\tsig := <-killChan\n\t\tlogger.Infow(\"exit requested, stopping recording and shutting down\", \"signal\", sig)\n\t\th.Kill()\n\t}()\n\n\th.Run()\n\treturn nil\n}\n"
  },
  {
    "path": "cmd/template_version/main.go",
    "content": "// Copyright 2025 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/livekit/egress/version\"\n)\n\nfunc main() {\n\tfmt.Println(version.TemplateVersion)\n}\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/livekit/egress\n\nreplace github.com/go-gst/go-gst => github.com/livekit/gst-go v0.0.0-20250701011214-e7f61abd14cb\n\ngo 1.26.1\n\ntool github.com/maxbrunsfeld/counterfeiter/v6\n\nrequire (\n\tcloud.google.com/go/storage v1.55.0\n\tgithub.com/Azure/azure-storage-blob-go v0.15.0\n\tgithub.com/aws/aws-sdk-go-v2 v1.41.5\n\tgithub.com/aws/aws-sdk-go-v2/config v1.29.17\n\tgithub.com/aws/aws-sdk-go-v2/credentials v1.17.70\n\tgithub.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.81\n\tgithub.com/aws/aws-sdk-go-v2/service/s3 v1.97.3\n\tgithub.com/aws/smithy-go v1.24.2\n\tgithub.com/chromedp/cdproto v0.0.0-20260405000525-47a8ff65b46a\n\tgithub.com/chromedp/chromedp v0.15.1\n\tgithub.com/frostbyte73/core v0.1.1\n\tgithub.com/go-gst/go-glib v1.4.1-0.20241209142714-f53cebf18559\n\tgithub.com/go-gst/go-gst v1.4.0\n\tgithub.com/go-jose/go-jose/v4 v4.1.4\n\tgithub.com/googleapis/gax-go/v2 v2.14.2\n\tgithub.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674\n\tgithub.com/linkdata/deadlock v0.5.5\n\tgithub.com/livekit/livekit-server v1.9.12\n\tgithub.com/livekit/mageutil v0.0.0-20250511045019-0f1ff63f7731\n\tgithub.com/livekit/media-sdk v0.0.0-20260422170315-2c3eed337496\n\tgithub.com/livekit/protocol v1.45.6\n\tgithub.com/livekit/psrpc v0.7.1\n\tgithub.com/livekit/server-sdk-go/v2 v2.16.2-0.20260401161108-50e969e2961f\n\tgithub.com/livekit/storage v0.0.0-20251113154014-aa1f4d0ce057\n\tgithub.com/llehouerou/go-mp3 v1.2.0\n\tgithub.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58\n\tgithub.com/pion/rtp v1.10.1\n\tgithub.com/pion/webrtc/v4 v4.2.7\n\tgithub.com/prometheus/client_golang v1.23.0\n\tgithub.com/prometheus/client_model v0.6.2\n\tgithub.com/prometheus/common v0.67.5\n\tgithub.com/stretchr/testify v1.11.1\n\tgithub.com/urfave/cli/v3 v3.3.9\n\tgo.opentelemetry.io/otel v1.40.0\n\tgo.uber.org/atomic v1.11.0\n\tgo.uber.org/zap v1.27.1\n\tgolang.org/x/exp v0.0.0-20260212183809-81e46e3db34a\n\tgoogle.golang.org/api v0.238.0\n\tgoogle.golang.org/grpc v1.79.3\n\tgoogle.golang.org/protobuf v1.36.11\n\tgopkg.in/natefinch/lumberjack.v2 v2.2.1\n\tgopkg.in/yaml.v3 v3.0.1\n)\n\nrequire (\n\tbuf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1 // indirect\n\tbuf.build/go/protovalidate v1.1.2 // indirect\n\tbuf.build/go/protoyaml v0.6.0 // indirect\n\tcel.dev/expr v0.25.1 // indirect\n\tcloud.google.com/go v0.121.1 // indirect\n\tcloud.google.com/go/auth v0.16.2 // indirect\n\tcloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect\n\tcloud.google.com/go/compute/metadata v0.9.0 // indirect\n\tcloud.google.com/go/iam v1.5.2 // indirect\n\tcloud.google.com/go/monitoring v1.24.2 // indirect\n\tgithub.com/Azure/azure-pipeline-go v0.2.3 // indirect\n\tgithub.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect\n\tgithub.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect\n\tgithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect\n\tgithub.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible // indirect\n\tgithub.com/antlr4-go/antlr/v4 v4.13.1 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.13 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect\n\tgithub.com/benbjohnson/clock v1.3.5 // indirect\n\tgithub.com/beorn7/perks v1.0.1 // indirect\n\tgithub.com/bep/debounce v1.2.1 // indirect\n\tgithub.com/cespare/xxhash/v2 v2.3.0 // indirect\n\tgithub.com/chromedp/sysutil v1.1.0 // indirect\n\tgithub.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect\n\tgithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect\n\tgithub.com/dennwc/iters v1.2.2 // indirect\n\tgithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect\n\tgithub.com/elliotchance/orderedmap/v2 v2.7.0 // indirect\n\tgithub.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect\n\tgithub.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect\n\tgithub.com/felixge/httpsnoop v1.0.4 // indirect\n\tgithub.com/fsnotify/fsnotify v1.9.0 // indirect\n\tgithub.com/gammazero/deque v1.2.1 // indirect\n\tgithub.com/go-gst/go-pointer v0.0.0-20241127163939-ba766f075b4c // indirect\n\tgithub.com/go-jose/go-jose/v3 v3.0.5 // indirect\n\tgithub.com/go-json-experiment/json v0.0.0-20260214004413-d219187c3433 // indirect\n\tgithub.com/go-logr/logr v1.4.3 // indirect\n\tgithub.com/go-logr/stdr v1.2.2 // indirect\n\tgithub.com/gobwas/httphead v0.1.0 // indirect\n\tgithub.com/gobwas/pool v0.2.1 // indirect\n\tgithub.com/gobwas/ws v1.4.0 // indirect\n\tgithub.com/google/cel-go v0.27.0 // indirect\n\tgithub.com/google/s2a-go v0.1.9 // indirect\n\tgithub.com/google/uuid v1.6.0 // indirect\n\tgithub.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect\n\tgithub.com/hashicorp/go-cleanhttp v0.5.2 // indirect\n\tgithub.com/hashicorp/go-retryablehttp v0.7.7 // indirect\n\tgithub.com/jellydator/ttlcache/v3 v3.4.0 // indirect\n\tgithub.com/jxskiss/base62 v1.1.0 // indirect\n\tgithub.com/klauspost/compress v1.18.4 // indirect\n\tgithub.com/klauspost/cpuid/v2 v2.3.0 // indirect\n\tgithub.com/lithammer/shortuuid/v4 v4.2.0 // indirect\n\tgithub.com/livekit/mediatransportutil v0.0.0-20260113174415-2e8ba344fca3 // indirect\n\tgithub.com/mackerelio/go-osstat v0.2.6 // indirect\n\tgithub.com/magefile/mage v1.15.0 // indirect\n\tgithub.com/mattn/go-ieproxy v0.0.12 // indirect\n\tgithub.com/maxbrunsfeld/counterfeiter/v6 v6.12.1 // indirect\n\tgithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect\n\tgithub.com/nats-io/nats.go v1.48.0 // indirect\n\tgithub.com/nats-io/nkeys v0.4.15 // indirect\n\tgithub.com/nats-io/nuid v1.0.1 // indirect\n\tgithub.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe // indirect\n\tgithub.com/pion/datachannel v1.6.0 // indirect\n\tgithub.com/pion/dtls/v3 v3.1.2 // indirect\n\tgithub.com/pion/ice/v4 v4.2.0 // indirect\n\tgithub.com/pion/interceptor v0.1.44 // indirect\n\tgithub.com/pion/logging v0.2.4 // indirect\n\tgithub.com/pion/mdns/v2 v2.1.0 // indirect\n\tgithub.com/pion/randutil v0.1.0 // indirect\n\tgithub.com/pion/rtcp v1.2.16 // indirect\n\tgithub.com/pion/sctp v1.9.2 // indirect\n\tgithub.com/pion/sdp/v3 v3.0.18 // indirect\n\tgithub.com/pion/srtp/v3 v3.0.10 // indirect\n\tgithub.com/pion/stun/v3 v3.1.1 // indirect\n\tgithub.com/pion/transport/v4 v4.0.1 // indirect\n\tgithub.com/pion/turn/v4 v4.1.4 // indirect\n\tgithub.com/pkg/errors v0.9.1 // indirect\n\tgithub.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect\n\tgithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect\n\tgithub.com/prometheus/procfs v0.19.2 // indirect\n\tgithub.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect\n\tgithub.com/redis/go-redis/v9 v9.17.3 // indirect\n\tgithub.com/spiffe/go-spiffe/v2 v2.6.0 // indirect\n\tgithub.com/twitchtv/twirp v8.1.3+incompatible // indirect\n\tgithub.com/wlynxg/anet v0.0.5 // indirect\n\tgithub.com/zeebo/xxh3 v1.1.0 // indirect\n\tgo.opentelemetry.io/auto/sdk v1.2.1 // indirect\n\tgo.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect\n\tgo.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect\n\tgo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect\n\tgo.opentelemetry.io/otel/metric v1.40.0 // indirect\n\tgo.opentelemetry.io/otel/sdk v1.40.0 // indirect\n\tgo.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect\n\tgo.opentelemetry.io/otel/trace v1.40.0 // indirect\n\tgo.uber.org/multierr v1.11.0 // indirect\n\tgo.uber.org/zap/exp v0.3.0 // indirect\n\tgo.yaml.in/yaml/v2 v2.4.3 // indirect\n\tgolang.org/x/crypto v0.48.0 // indirect\n\tgolang.org/x/mod v0.33.0 // indirect\n\tgolang.org/x/net v0.50.0 // indirect\n\tgolang.org/x/oauth2 v0.34.0 // indirect\n\tgolang.org/x/sync v0.19.0 // indirect\n\tgolang.org/x/sys v0.42.0 // indirect\n\tgolang.org/x/text v0.34.0 // indirect\n\tgolang.org/x/time v0.14.0 // indirect\n\tgolang.org/x/tools v0.42.0 // indirect\n\tgoogle.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect\n\tgoogle.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect\n\tgoogle.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect\n)\n"
  },
  {
    "path": "go.sum",
    "content": "buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1 h1:PMmTMyvHScV9Mn8wc6ASge9uRcHy0jtqPd+fM35LmsQ=\nbuf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20260209202127-80ab13bee0bf.1/go.mod h1:tvtbpgaVXZX4g6Pn+AnzFycuRK3MOz5HJfEGeEllXYM=\nbuf.build/go/protovalidate v1.1.2 h1:83vYHoY8f34hB8MeitGaYE3CGVPFxwdEUuskh5qQpA0=\nbuf.build/go/protovalidate v1.1.2/go.mod h1:Ez3z+w4c+wG+EpW8ovgZaZPnPl2XVF6kaxgcv1NG/QE=\nbuf.build/go/protoyaml v0.6.0 h1:Nzz1lvcXF8YgNZXk+voPPwdU8FjDPTUV4ndNTXN0n2w=\nbuf.build/go/protoyaml v0.6.0/go.mod h1:RgUOsBu/GYKLDSIRgQXniXbNgFlGEZnQpRAUdLAFV2Q=\ncel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4=\ncel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4=\ncloud.google.com/go v0.121.1 h1:S3kTQSydxmu1JfLRLpKtxRPA7rSrYPRPEUmL/PavVUw=\ncloud.google.com/go v0.121.1/go.mod h1:nRFlrHq39MNVWu+zESP2PosMWA0ryJw8KUBZ2iZpxbw=\ncloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4=\ncloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA=\ncloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=\ncloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=\ncloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=\ncloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=\ncloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8=\ncloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE=\ncloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=\ncloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=\ncloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE=\ncloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY=\ncloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM=\ncloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U=\ncloud.google.com/go/storage v1.55.0 h1:NESjdAToN9u1tmhVqhXCaCwYBuvEhZLLv0gBr+2znf0=\ncloud.google.com/go/storage v1.55.0/go.mod h1:ztSmTTwzsdXe5syLVS0YsbFxXuvEmEyZj7v7zChEmuY=\ncloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4=\ncloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI=\ndario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=\ndario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=\ngithub.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=\ngithub.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=\ngithub.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk=\ngithub.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58=\ngithub.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=\ngithub.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=\ngithub.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=\ngithub.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=\ngithub.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=\ngithub.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=\ngithub.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=\ngithub.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=\ngithub.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=\ngithub.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=\ngithub.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0 h1:OqVGm6Ei3x5+yZmSJG1Mh2NwHvpVmZ08CB5qJhT9Nuk=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0=\ngithub.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=\ngithub.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=\ngithub.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=\ngithub.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=\ngithub.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g=\ngithub.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=\ngithub.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=\ngithub.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=\ngithub.com/aws/aws-sdk-go-v2 v1.41.5 h1:dj5kopbwUsVUVFgO4Fi5BIT3t4WyqIDjGKCangnV/yY=\ngithub.com/aws/aws-sdk-go-v2 v1.41.5/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o=\ngithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8 h1:eBMB84YGghSocM7PsjmmPffTa+1FBUeNvGvFou6V/4o=\ngithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8/go.mod h1:lyw7GFp3qENLh7kwzf7iMzAxDn+NzjXEAGjKS2UOKqI=\ngithub.com/aws/aws-sdk-go-v2/config v1.29.17 h1:jSuiQ5jEe4SAMH6lLRMY9OVC+TqJLP5655pBGjmnjr0=\ngithub.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8=\ngithub.com/aws/aws-sdk-go-v2/credentials v1.17.70 h1:ONnH5CM16RTXRkS8Z1qg7/s2eDOhHhaXVd72mmyv4/0=\ngithub.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc=\ngithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 h1:KAXP9JSHO1vKGCr5f4O6WmlVKLFFXgWYAGoJosorxzU=\ngithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32/go.mod h1:h4Sg6FQdexC1yYG9RDnOvLbW1a/P986++/Y/a+GyEM8=\ngithub.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.81 h1:E5ff1vZlAudg24j5lF6F6/gBpln2LjWxGdQDBSLfVe4=\ngithub.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.81/go.mod h1:hHBLCuhHI4Aokvs5vdVoCDBzmFy86yxs5J7LEPQwQEM=\ngithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 h1:Rgg6wvjjtX8bNHcvi9OnXWwcE0a2vGpbwmtICOsvcf4=\ngithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21/go.mod h1:A/kJFst/nm//cyqonihbdpQZwiUhhzpqTsdbhDdRF9c=\ngithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 h1:PEgGVtPoB6NTpPrBgqSE5hE/o47Ij9qk/SEZFbUOe9A=\ngithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21/go.mod h1:p+hz+PRAYlY3zcpJhPwXlLC4C+kqn70WIHwnzAfs6ps=\ngithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=\ngithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=\ngithub.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22 h1:rWyie/PxDRIdhNf4DzRk0lvjVOqFJuNnO8WwaIRVxzQ=\ngithub.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22/go.mod h1:zd/JsJ4P7oGfUhXn1VyLqaRZwPmZwg44Jf2dS84Dm3Y=\ngithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 h1:5EniKhLZe4xzL7a+fU3C2tfUN4nWIqlLesfrjkuPFTY=\ngithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI=\ngithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.13 h1:JRaIgADQS/U6uXDqlPiefP32yXTda7Kqfx+LgspooZM=\ngithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.13/go.mod h1:CEuVn5WqOMilYl+tbccq8+N2ieCy0gVn3OtRb0vBNNM=\ngithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 h1:c31//R3xgIJMSC8S6hEVq+38DcvUlgFY0FM6mSI5oto=\ngithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21/go.mod h1:r6+pf23ouCB718FUxaqzZdbpYFyDtehyZcmP5KL9FkA=\ngithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21 h1:ZlvrNcHSFFWURB8avufQq9gFsheUgjVD9536obIknfM=\ngithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21/go.mod h1:cv3TNhVrssKR0O/xxLJVRfd2oazSnZnkUeTf6ctUwfQ=\ngithub.com/aws/aws-sdk-go-v2/service/s3 v1.97.3 h1:HwxWTbTrIHm5qY+CAEur0s/figc3qwvLWsNkF4RPToo=\ngithub.com/aws/aws-sdk-go-v2/service/s3 v1.97.3/go.mod h1:uoA43SdFwacedBfSgfFSjjCvYe8aYBS7EnU5GZ/YKMM=\ngithub.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg=\ngithub.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc=\ngithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg=\ngithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E=\ngithub.com/aws/aws-sdk-go-v2/service/sts v1.34.0 h1:NFOJ/NXEGV4Rq//71Hs1jC/NvPs1ezajK+yQmkwnPV0=\ngithub.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w=\ngithub.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng=\ngithub.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc=\ngithub.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=\ngithub.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=\ngithub.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=\ngithub.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=\ngithub.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY=\ngithub.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=\ngithub.com/brianvoe/gofakeit/v6 v6.28.0 h1:Xib46XXuQfmlLS2EXRuJpqcw8St6qSZz75OUo0tgAW4=\ngithub.com/brianvoe/gofakeit/v6 v6.28.0/go.mod h1:Xj58BMSnFqcn/fAQeSK+/PLtC5kSb7FJIq4JyGa8vEs=\ngithub.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=\ngithub.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=\ngithub.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=\ngithub.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=\ngithub.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=\ngithub.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=\ngithub.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=\ngithub.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/chromedp/cdproto v0.0.0-20260405000525-47a8ff65b46a h1:Kk4P1W58eAf+OUGtx51cM7CcJokJuBEmOxxwPdHFH4Q=\ngithub.com/chromedp/cdproto v0.0.0-20260405000525-47a8ff65b46a/go.mod h1:cbyjALe67vDvlvdiG9369P8w5U2w6IshwtyD2f2Tvag=\ngithub.com/chromedp/chromedp v0.15.1 h1:EJWiPm7BNqDqjYy6U0lTSL5wNH+iNt9GjC3a4gfjNyQ=\ngithub.com/chromedp/chromedp v0.15.1/go.mod h1:CdTHtUqD/dqaFw/cvFWtTydoEQS44wLBuwbMR9EkOY4=\ngithub.com/chromedp/sysutil v1.1.0 h1:PUFNv5EcprjqXZD9nJb9b/c9ibAbxiYo4exNWZyipwM=\ngithub.com/chromedp/sysutil v1.1.0/go.mod h1:WiThHUdltqCNKGc4gaU50XgYjwjYIhKWoHGPTUfWTJ8=\ngithub.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w=\ngithub.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI=\ngithub.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=\ngithub.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=\ngithub.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=\ngithub.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=\ngithub.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=\ngithub.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=\ngithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/dennwc/iters v1.2.2 h1:XH2/Etihiy9ZvPOVCR+icQXeYlhbvS7k0qro4x/2qQo=\ngithub.com/dennwc/iters v1.2.2/go.mod h1:M9KuuMBeyEXYTmB7EnI9SCyALFCmPWOIxn5W1L0CjGg=\ngithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=\ngithub.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=\ngithub.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=\ngithub.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=\ngithub.com/docker/cli v29.0.0+incompatible h1:KgsN2RUFMNM8wChxryicn4p46BdQWpXOA1XLGBGPGAw=\ngithub.com/docker/cli v29.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=\ngithub.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=\ngithub.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=\ngithub.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=\ngithub.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=\ngithub.com/elliotchance/orderedmap/v2 v2.7.0 h1:WHuf0DRo63uLnldCPp9ojm3gskYwEdIIfAUVG5KhoOc=\ngithub.com/elliotchance/orderedmap/v2 v2.7.0/go.mod h1:85lZyVbpGaGvHvnKa7Qhx7zncAdBIBq6u56Hb1PRU5Q=\ngithub.com/envoyproxy/go-control-plane v0.14.0 h1:hbG2kr4RuFj222B6+7T83thSPqLjwBIfQawTkC++2HA=\ngithub.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU=\ngithub.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g=\ngithub.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98=\ngithub.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=\ngithub.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=\ngithub.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4=\ngithub.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA=\ngithub.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=\ngithub.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=\ngithub.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=\ngithub.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=\ngithub.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=\ngithub.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=\ngithub.com/frostbyte73/core v0.1.1 h1:ChhJOR7bAKOCPbA+lqDLE2cGKlCG5JXsDvvQr4YaJIA=\ngithub.com/frostbyte73/core v0.1.1/go.mod h1:mhfOtR+xWAvwXiwor7jnqPMnu4fxbv1F2MwZ0BEpzZo=\ngithub.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=\ngithub.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=\ngithub.com/gammazero/deque v1.2.1 h1:9fnQVFCCZ9/NOc7ccTNqzoKd1tCWOqeI05/lPqFPMGQ=\ngithub.com/gammazero/deque v1.2.1/go.mod h1:5nSFkzVm+afG9+gy0VIowlqVAW4N8zNcMne+CMQVD2g=\ngithub.com/go-gst/go-glib v1.4.1-0.20241209142714-f53cebf18559 h1:AK60n6W3FLZTp9H1KU5VOa8XefNO0w0R3pfszphwX14=\ngithub.com/go-gst/go-glib v1.4.1-0.20241209142714-f53cebf18559/go.mod h1:ZWT4LXOO2PH8lSNu/dR5O2yoNQJKEgmijNa2d7nByK8=\ngithub.com/go-gst/go-pointer v0.0.0-20241127163939-ba766f075b4c h1:x8kKRVDmz5BRlolmDZGcsuZ1l+js6TRL3QWBJjGVctM=\ngithub.com/go-gst/go-pointer v0.0.0-20241127163939-ba766f075b4c/go.mod h1:qKw5ZZ0U58W6PU/7F/Lopv+14nKYmdXlOd7VnAZ17Mk=\ngithub.com/go-jose/go-jose/v3 v3.0.5 h1:BLLJWbC4nMZOfuPVxoZIxeYsn6Nl2r1fITaJ78UQlVQ=\ngithub.com/go-jose/go-jose/v3 v3.0.5/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=\ngithub.com/go-jose/go-jose/v4 v4.1.4 h1:moDMcTHmvE6Groj34emNPLs/qtYXRVcd6S7NHbHz3kA=\ngithub.com/go-jose/go-jose/v4 v4.1.4/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=\ngithub.com/go-json-experiment/json v0.0.0-20260214004413-d219187c3433 h1:vymEbVwYFP/L05h5TKQxvkXoKxNvTpjxYKdF1Nlwuao=\ngithub.com/go-json-experiment/json v0.0.0-20260214004413-d219187c3433/go.mod h1:tphK2c80bpPhMOI4v6bIc2xWywPfbqi1Z06+RcrMkDg=\ngithub.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=\ngithub.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=\ngithub.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=\ngithub.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=\ngithub.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=\ngithub.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=\ngithub.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=\ngithub.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=\ngithub.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=\ngithub.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=\ngithub.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=\ngithub.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs=\ngithub.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc=\ngithub.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=\ngithub.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=\ngithub.com/google/cel-go v0.27.0 h1:e7ih85+4qVrBuqQWTW4FKSqZYokVuc3HnhH5keboFTo=\ngithub.com/google/cel-go v0.27.0/go.mod h1:tTJ11FWqnhw5KKpnWpvW9CJC3Y9GK4EIS0WXnBbebzw=\ngithub.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=\ngithub.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=\ngithub.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=\ngithub.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=\ngithub.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=\ngithub.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=\ngithub.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=\ngithub.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=\ngithub.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=\ngithub.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=\ngithub.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=\ngithub.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=\ngithub.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=\ngithub.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=\ngithub.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=\ngithub.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=\ngithub.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=\ngithub.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=\ngithub.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=\ngithub.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=\ngithub.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=\ngithub.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=\ngithub.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY=\ngithub.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4=\ngithub.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=\ngithub.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=\ngithub.com/jxskiss/base62 v1.1.0 h1:A5zbF8v8WXx2xixnAKD2w+abC+sIzYJX+nxmhA6HWFw=\ngithub.com/jxskiss/base62 v1.1.0/go.mod h1:HhWAlUXvxKThfOlZbcuFzsqwtF5TcqS9ru3y5GfjWAc=\ngithub.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=\ngithub.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=\ngithub.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=\ngithub.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=\ngithub.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=\ngithub.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=\ngithub.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=\ngithub.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=\ngithub.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=\ngithub.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=\ngithub.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo=\ngithub.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=\ngithub.com/linkdata/deadlock v0.5.5 h1:d6O+rzEqasSfamGDA8u7bjtaq7hOX8Ha4Zn36Wxrkvo=\ngithub.com/linkdata/deadlock v0.5.5/go.mod h1:tXb28stzAD3trzEEK0UJWC+rZKuobCoPktPYzebb1u0=\ngithub.com/lithammer/shortuuid/v4 v4.2.0 h1:LMFOzVB3996a7b8aBuEXxqOBflbfPQAiVzkIcHO0h8c=\ngithub.com/lithammer/shortuuid/v4 v4.2.0/go.mod h1:D5noHZ2oFw/YaKCfGy0YxyE7M0wMbezmMjPdhyEFe6Y=\ngithub.com/livekit/gst-go v0.0.0-20250701011214-e7f61abd14cb h1:1Vjk6NaXJZQiCvXGlKv38ossk4mNKHy5ob+eZygewdw=\ngithub.com/livekit/gst-go v0.0.0-20250701011214-e7f61abd14cb/go.mod h1:pyCgY9XFSG0CAnJzoJ84R5XWn8rEj849EYJOwnAdB8k=\ngithub.com/livekit/livekit-server v1.9.12 h1:VsPJAL2EbiBKt5SIhZWazNUSkEUZi/8P8kttGXUE1zw=\ngithub.com/livekit/livekit-server v1.9.12/go.mod h1:Xh2ocHdH+z/D2u00GulDVVIDdgAlck1miHT0Ab2Skvg=\ngithub.com/livekit/mageutil v0.0.0-20250511045019-0f1ff63f7731 h1:9x+U2HGLrSw5ATTo469PQPkqzdoU7be46ryiCDO3boc=\ngithub.com/livekit/mageutil v0.0.0-20250511045019-0f1ff63f7731/go.mod h1:Rs3MhFwutWhGwmY1VQsygw28z5bWcnEYmS1OG9OxjOQ=\ngithub.com/livekit/media-sdk v0.0.0-20260422170315-2c3eed337496 h1:yIEbXERsObyjGGoTnv7Bf37pQfAHrxRmPAN//tgzwJU=\ngithub.com/livekit/media-sdk v0.0.0-20260422170315-2c3eed337496/go.mod h1:7ssWiG+U4xnbvLih9WiZbhQP6zIKMjgXdUtIE1bm/E8=\ngithub.com/livekit/mediatransportutil v0.0.0-20260113174415-2e8ba344fca3 h1:v1Xc/q/547TjLX7Nw5y2vXNnmV0XYFAbhTJrtErQeDA=\ngithub.com/livekit/mediatransportutil v0.0.0-20260113174415-2e8ba344fca3/go.mod h1:QBx/KHV6Vv00ggibg/WrOlqrkTciEA2Hc9DGWYr3Q9U=\ngithub.com/livekit/protocol v1.45.6 h1:E+wKxs8ckKNYYTNyHm5nR1ShGLJ5DmA+WCEb5AJG11A=\ngithub.com/livekit/protocol v1.45.6/go.mod h1:e6QdWDkfot+M2nRh0eitJUS0ZLuwvKCsfiz2pWWSG3s=\ngithub.com/livekit/psrpc v0.7.1 h1:ms37az0QTD3UXIWuUC5D/SkmKOlRMVRsI261eBWu/Vw=\ngithub.com/livekit/psrpc v0.7.1/go.mod h1:bZ4iHFQptTkbPnB0LasvRNu/OBYXEu1NA6O5BMFo9kk=\ngithub.com/livekit/server-sdk-go/v2 v2.16.2-0.20260401161108-50e969e2961f h1:xSUtbUe3wBIFG/Ki3KEIsmjkOcfbpSOYJh2xxwJEllg=\ngithub.com/livekit/server-sdk-go/v2 v2.16.2-0.20260401161108-50e969e2961f/go.mod h1:oQbYijcbPzfjBAOzoq7tz9Ktqur8JNRCd923VP8xOQQ=\ngithub.com/livekit/storage v0.0.0-20251113154014-aa1f4d0ce057 h1:6XTEL0cSGkDPWYl1nAS/3cNOK1QoIo11C/O4pc4vPMg=\ngithub.com/livekit/storage v0.0.0-20251113154014-aa1f4d0ce057/go.mod h1:m+EDdiNremMNJbggvfj5mY8w7nbzVGtZka5Jhj4pg0g=\ngithub.com/llehouerou/go-mp3 v1.2.0 h1:2WN/bjCGhfPZAQbSSF35DxNKS/+HPnJ76TakA7Kyscs=\ngithub.com/llehouerou/go-mp3 v1.2.0/go.mod h1:/Rl7E/VQpWTQDTJgr69iYVSkS1BZEh4X/ABV1XvIpHA=\ngithub.com/mackerelio/go-osstat v0.2.6 h1:gs4U8BZeS1tjrL08tt5VUliVvSWP26Ai2Ob8Lr7f2i0=\ngithub.com/mackerelio/go-osstat v0.2.6/go.mod h1:lRy8V9ZuHpuRVZh+vyTkODeDPl3/d5MgXHtLSaqG8bA=\ngithub.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=\ngithub.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=\ngithub.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=\ngithub.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=\ngithub.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=\ngithub.com/mattn/go-ieproxy v0.0.12 h1:OZkUFJC3ESNZPQ+6LzC3VJIFSnreeFLQyqvBWtvfL2M=\ngithub.com/mattn/go-ieproxy v0.0.12/go.mod h1:Vn+N61199DAnVeTgaF8eoB9PvLO8P3OBnG95ENh7B7c=\ngithub.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=\ngithub.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=\ngithub.com/maxbrunsfeld/counterfeiter/v6 v6.12.1 h1:D4O2wLxB384TS3ohBJMfolnxb4qGmoZ1PnWNtit8LYo=\ngithub.com/maxbrunsfeld/counterfeiter/v6 v6.12.1/go.mod h1:RuJdxo0oI6dClIaMzdl3hewq3a065RH65dofJP03h8I=\ngithub.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=\ngithub.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=\ngithub.com/moby/moby/api v1.52.0 h1:00BtlJY4MXkkt84WhUZPRqt5TvPbgig2FZvTbe3igYg=\ngithub.com/moby/moby/api v1.52.0/go.mod h1:8mb+ReTlisw4pS6BRzCMts5M49W5M7bKt1cJy/YbAqc=\ngithub.com/moby/moby/client v0.1.0 h1:nt+hn6O9cyJQqq5UWnFGqsZRTS/JirUqzPjEl0Bdc/8=\ngithub.com/moby/moby/client v0.1.0/go.mod h1:O+/tw5d4a1Ha/ZA/tPxIZJapJRUS6LNZ1wiVRxYHyUE=\ngithub.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=\ngithub.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=\ngithub.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=\ngithub.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/nats-io/nats.go v1.48.0 h1:pSFyXApG+yWU/TgbKCjmm5K4wrHu86231/w84qRVR+U=\ngithub.com/nats-io/nats.go v1.48.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=\ngithub.com/nats-io/nkeys v0.4.15 h1:JACV5jRVO9V856KOapQ7x+EY8Jo3qw1vJt/9Jpwzkk4=\ngithub.com/nats-io/nkeys v0.4.15/go.mod h1:CpMchTXC9fxA5zrMo4KpySxNjiDVvr8ANOSZdiNfUrs=\ngithub.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=\ngithub.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=\ngithub.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=\ngithub.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=\ngithub.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=\ngithub.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=\ngithub.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=\ngithub.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=\ngithub.com/opencontainers/runc v1.3.3 h1:qlmBbbhu+yY0QM7jqfuat7M1H3/iXjju3VkP9lkFQr4=\ngithub.com/opencontainers/runc v1.3.3/go.mod h1:D7rL72gfWxVs9cJ2/AayxB0Hlvn9g0gaF1R7uunumSI=\ngithub.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde h1:x0TT0RDC7UhAVbbWWBzr41ElhJx5tXPWkIHA2HWPRuw=\ngithub.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=\ngithub.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw=\ngithub.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE=\ngithub.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=\ngithub.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=\ngithub.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe h1:vHpqOnPlnkba8iSxU4j/CvDSS9J4+F4473esQsYLGoE=\ngithub.com/petermattis/goid v0.0.0-20250813065127-a731cc31b4fe/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=\ngithub.com/pion/datachannel v1.6.0 h1:XecBlj+cvsxhAMZWFfFcPyUaDZtd7IJvrXqlXD/53i0=\ngithub.com/pion/datachannel v1.6.0/go.mod h1:ur+wzYF8mWdC+Mkis5Thosk+u/VOL287apDNEbFpsIk=\ngithub.com/pion/dtls/v3 v3.1.2 h1:gqEdOUXLtCGW+afsBLO0LtDD8GnuBBjEy6HRtyofZTc=\ngithub.com/pion/dtls/v3 v3.1.2/go.mod h1:Hw/igcX4pdY69z1Hgv5x7wJFrUkdgHwAn/Q/uo7YHRo=\ngithub.com/pion/ice/v4 v4.2.0 h1:jJC8S+CvXCCvIQUgx+oNZnoUpt6zwc34FhjWwCU4nlw=\ngithub.com/pion/ice/v4 v4.2.0/go.mod h1:EgjBGxDgmd8xB0OkYEVFlzQuEI7kWSCFu+mULqaisy4=\ngithub.com/pion/interceptor v0.1.44 h1:sNlZwM8dWXU9JQAkJh8xrarC0Etn8Oolcniukmuy0/I=\ngithub.com/pion/interceptor v0.1.44/go.mod h1:4atVlBkcgXuUP+ykQF0qOCGU2j7pQzX2ofvPRFsY5RY=\ngithub.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8=\ngithub.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so=\ngithub.com/pion/mdns/v2 v2.1.0 h1:3IJ9+Xio6tWYjhN6WwuY142P/1jA0D5ERaIqawg/fOY=\ngithub.com/pion/mdns/v2 v2.1.0/go.mod h1:pcez23GdynwcfRU1977qKU0mDxSeucttSHbCSfFOd9A=\ngithub.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=\ngithub.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=\ngithub.com/pion/rtcp v1.2.16 h1:fk1B1dNW4hsI78XUCljZJlC4kZOPk67mNRuQ0fcEkSo=\ngithub.com/pion/rtcp v1.2.16/go.mod h1:/as7VKfYbs5NIb4h6muQ35kQF/J0ZVNz2Z3xKoCBYOo=\ngithub.com/pion/rtp v1.10.1 h1:xP1prZcCTUuhO2c83XtxyOHJteISg6o8iPsE2acaMtA=\ngithub.com/pion/rtp v1.10.1/go.mod h1:rF5nS1GqbR7H/TCpKwylzeq6yDM+MM6k+On5EgeThEM=\ngithub.com/pion/sctp v1.9.2 h1:HxsOzEV9pWoeggv7T5kewVkstFNcGvhMPx0GvUOUQXo=\ngithub.com/pion/sctp v1.9.2/go.mod h1:OTOlsQ5EDQ6mQ0z4MUGXt2CgQmKyafBEXhUVqLRB6G8=\ngithub.com/pion/sdp/v3 v3.0.18 h1:l0bAXazKHpepazVdp+tPYnrsy9dfh7ZbT8DxesH5ZnI=\ngithub.com/pion/sdp/v3 v3.0.18/go.mod h1:ZREGo6A9ZygQ9XkqAj5xYCQtQpif0i6Pa81HOiAdqQ8=\ngithub.com/pion/srtp/v3 v3.0.10 h1:tFirkpBb3XccP5VEXLi50GqXhv5SKPxqrdlhDCJlZrQ=\ngithub.com/pion/srtp/v3 v3.0.10/go.mod h1:3mOTIB0cq9qlbn59V4ozvv9ClW/BSEbRp4cY0VtaR7M=\ngithub.com/pion/stun/v3 v3.1.1 h1:CkQxveJ4xGQjulGSROXbXq94TAWu8gIX2dT+ePhUkqw=\ngithub.com/pion/stun/v3 v3.1.1/go.mod h1:qC1DfmcCTQjl9PBaMa5wSn3x9IPmKxSdcCsxBcDBndM=\ngithub.com/pion/transport/v3 v3.1.1 h1:Tr684+fnnKlhPceU+ICdrw6KKkTms+5qHMgw6bIkYOM=\ngithub.com/pion/transport/v3 v3.1.1/go.mod h1:+c2eewC5WJQHiAA46fkMMzoYZSuGzA/7E2FPrOYHctQ=\ngithub.com/pion/transport/v4 v4.0.1 h1:sdROELU6BZ63Ab7FrOLn13M6YdJLY20wldXW2Cu2k8o=\ngithub.com/pion/transport/v4 v4.0.1/go.mod h1:nEuEA4AD5lPdcIegQDpVLgNoDGreqM/YqmEx3ovP4jM=\ngithub.com/pion/turn/v4 v4.1.4 h1:EU11yMXKIsK43FhcUnjLlrhE4nboHZq+TXBIi3QpcxQ=\ngithub.com/pion/turn/v4 v4.1.4/go.mod h1:ES1DXVFKnOhuDkqn9hn5VJlSWmZPaRJLyBXoOeO/BmQ=\ngithub.com/pion/webrtc/v4 v4.2.7 h1:NAdsMXzQk/2yN1uV06SGxXqqVrkpDmNe09st/u16rrY=\ngithub.com/pion/webrtc/v4 v4.2.7/go.mod h1:IzslI8Dkj2FFIre/Ua4TU86aXi+oC8g/nP1CW6Yuw34=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=\ngithub.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=\ngithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=\ngithub.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=\ngithub.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=\ngithub.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=\ngithub.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=\ngithub.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=\ngithub.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=\ngithub.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=\ngithub.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=\ngithub.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=\ngithub.com/redis/go-redis/v9 v9.17.3 h1:fN29NdNrE17KttK5Ndf20buqfDZwGNgoUr9qjl1DQx4=\ngithub.com/redis/go-redis/v9 v9.17.3/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370=\ngithub.com/rodaine/protogofakeit v0.1.1 h1:ZKouljuRM3A+TArppfBqnH8tGZHOwM/pjvtXe9DaXH8=\ngithub.com/rodaine/protogofakeit v0.1.1/go.mod h1:pXn/AstBYMaSfc1/RqH3N82pBuxtWgejz1AlYpY1mI0=\ngithub.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=\ngithub.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=\ngithub.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8=\ngithub.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM=\ngithub.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk=\ngithub.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI=\ngithub.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=\ngithub.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=\ngithub.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo=\ngithub.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=\ngithub.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=\ngithub.com/twitchtv/twirp v8.1.3+incompatible h1:+F4TdErPgSUbMZMwp13Q/KgDVuI7HJXP61mNV3/7iuU=\ngithub.com/twitchtv/twirp v8.1.3+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A=\ngithub.com/urfave/cli/v3 v3.3.9 h1:54roEDJcTWuucl6MSQ3B+pQqt1ePh/xOQokhEYl5Gfs=\ngithub.com/urfave/cli/v3 v3.3.9/go.mod h1:FJSKtM/9AiiTOJL4fJ6TbMUkxBXn7GO9guZqoZtpYpo=\ngithub.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=\ngithub.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=\ngithub.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=\ngithub.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=\ngithub.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=\ngithub.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=\ngithub.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=\ngithub.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=\ngithub.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=\ngithub.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=\ngithub.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=\ngithub.com/zeebo/xxh3 v1.1.0 h1:s7DLGDK45Dyfg7++yxI0khrfwq9661w9EN78eP/UZVs=\ngithub.com/zeebo/xxh3 v1.1.0/go.mod h1:IisAie1LELR4xhVinxWS5+zf1lA4p0MW4T+w+W07F5s=\ngo.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=\ngo.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=\ngo.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE=\ngo.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk=\ngo.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo=\ngo.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=\ngo.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=\ngo.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=\ngo.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=\ngo.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=\ngo.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=\ngo.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=\ngo.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=\ngo.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=\ngo.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=\ngo.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=\ngo.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=\ngo.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=\ngo.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=\ngo.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=\ngo.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=\ngo.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=\ngo.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=\ngo.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=\ngo.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=\ngo.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=\ngo.uber.org/zap/exp v0.3.0 h1:6JYzdifzYkGmTdRR59oYH+Ng7k49H9qVpWwNSsGJj3U=\ngo.uber.org/zap/exp v0.3.0/go.mod h1:5I384qq7XGxYyByIhHm6jg5CHkGY0nsTfbDLgDDlgJQ=\ngo.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=\ngo.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=\ngo.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=\ngo.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=\ngolang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=\ngolang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=\ngolang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o=\ngolang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA=\ngolang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=\ngolang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=\ngolang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=\ngolang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\ngolang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=\ngolang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=\ngolang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=\ngolang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60=\ngolang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=\ngolang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=\ngolang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=\ngolang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=\ngolang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=\ngolang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=\ngolang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=\ngolang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\ngolang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=\ngolang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=\ngolang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=\ngolang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=\ngolang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=\ngolang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=\ngolang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=\ngolang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=\ngolang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=\ngolang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=\ngolang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=\ngolang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=\ngolang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=\ngolang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=\ngonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=\ngoogle.golang.org/api v0.238.0 h1:+EldkglWIg/pWjkq97sd+XxH7PxakNYoe/rkSTbnvOs=\ngoogle.golang.org/api v0.238.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50=\ngoogle.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78=\ngoogle.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=\ngoogle.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE=\ngoogle.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=\ngoogle.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=\ngoogle.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=\ngopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=\ngopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\n"
  },
  {
    "path": "magefile.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build mage\n\npackage main\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"github.com/livekit/egress/version\"\n\t\"github.com/livekit/mageutil\"\n)\n\nconst (\n\tgstVersion      = \"1.24.12\"\n\tlibniceVersion  = \"0.1.21\"\n\tchromiumVersion = \"146.0.7680.177-1\"\n\tdockerBuild     = \"docker build\"\n\tdockerBuildX    = \"docker buildx build --push --platform linux/amd64,linux/arm64\"\n)\n\ntype packageInfo struct {\n\tDir string\n}\n\nfunc Proto() error {\n\tctx := context.Background()\n\tfmt.Println(\"generating protobuf\")\n\n\t// parse go mod output\n\tpkgOut, err := mageutil.Out(ctx, \"go list -json -m github.com/livekit/protocol\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tpi := packageInfo{}\n\tif err = json.Unmarshal(pkgOut, &pi); err != nil {\n\t\treturn err\n\t}\n\n\tpsrpcOut, err := mageutil.Out(ctx, \"go list -json -m github.com/livekit/psrpc\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tpsrpcInfo := packageInfo{}\n\tif err = json.Unmarshal(psrpcOut, &psrpcInfo); err != nil {\n\t\treturn err\n\t}\n\n\t_, err = mageutil.GetToolPath(\"protoc\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tprotocGoPath, err := mageutil.GetToolPath(\"protoc-gen-go\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tprotocGrpcGoPath, err := mageutil.GetToolPath(\"protoc-gen-go-grpc\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// generate grpc-related protos\n\treturn mageutil.RunDir(ctx, \"pkg/ipc\", fmt.Sprintf(\n\t\t\"protoc\"+\n\t\t\t\" --go_out .\"+\n\t\t\t\" --go-grpc_out .\"+\n\t\t\t\" --go_opt=paths=source_relative\"+\n\t\t\t\" --go-grpc_opt=paths=source_relative\"+\n\t\t\t\" --plugin=go=%s\"+\n\t\t\t\" --plugin=go-grpc=%s\"+\n\t\t\t\" -I%s -I%s -I=. ipc.proto\",\n\t\tprotocGoPath, protocGrpcGoPath, pi.Dir+\"/protobufs\", psrpcInfo.Dir+\"/protoc-gen-psrpc/options\",\n\t))\n}\n\nfunc EnsureMediaSamples() error {\n\tctx := context.Background()\n\n\tconst script = \"build/test/fetch-media-samples.sh\"\n\tif _, err := os.Stat(script); err != nil {\n\t\treturn fmt.Errorf(\"missing %s: %w\", script, err)\n\t}\n\n\tif err := mageutil.Run(ctx, script); err != nil {\n\t\treturn err\n\t}\n\n\tif entries, _ := os.ReadDir(\"media-samples\"); len(entries) == 0 {\n\t\treturn fmt.Errorf(\"media-samples is empty after %s\", script)\n\t}\n\treturn nil\n}\n\nfunc Integration(configFile string) error {\n\tif err := EnsureMediaSamples(); err != nil {\n\t\treturn err\n\t}\n\n\tctx := context.Background()\n\tos.Setenv(\"DOCKER_BUILDKIT\", \"1\")\n\tdefer os.Unsetenv(\"DOCKER_BUILDKIT\")\n\n\tif err := mageutil.Run(ctx,\n\t\tfmt.Sprintf(\"docker build --build-arg TEMPLATE_TAG=%s --build-arg DEADLOCK=1 -t egress-test -f build/test/Dockerfile .\", version.TemplateVersion),\n\t); err != nil {\n\t\treturn err\n\t}\n\treturn Retest(configFile)\n}\n\nfunc Retest(configFile string) error {\n\tif configFile != \"\" {\n\t\tif strings.HasPrefix(configFile, \"test/\") {\n\t\t\tconfigFile = configFile[5:]\n\t\t} else {\n\t\t\toldLocation := configFile\n\t\t\tidx := strings.LastIndex(configFile, \"/\")\n\t\t\tif idx != -1 {\n\t\t\t\tconfigFile = configFile[idx+1:]\n\t\t\t}\n\t\t\tif err := os.Rename(oldLocation, \"test/\"+configFile); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tconfigFile = \"/out/\" + configFile\n\t}\n\n\tdefer Dotfiles()\n\tdefer func() {\n\t\t// for some reason, these can't be deleted from within the docker container\n\t\tfiles, _ := os.ReadDir(\"test/output\")\n\t\tfor _, file := range files {\n\t\t\tif file.IsDir() {\n\t\t\t\td, _ := os.ReadDir(path.Join(\"test/output\", file.Name()))\n\t\t\t\tif len(d) == 0 {\n\t\t\t\t\t_ = os.RemoveAll(path.Join(\"test/output\", file.Name()))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn mageutil.Run(context.Background(),\n\t\tfmt.Sprintf(\"docker run --rm -e EGRESS_CONFIG_FILE=%s -v %s/test:/out egress-test\", configFile, dir),\n\t)\n}\n\nfunc Build() error {\n\treturn mageutil.Run(context.Background(),\n\t\tfmt.Sprintf(\"docker pull livekit/chrome-installer:%s\", chromiumVersion),\n\t\tfmt.Sprintf(\"docker pull livekit/gstreamer:%s-dev\", gstVersion),\n\t\tfmt.Sprintf(\"docker build -t livekit/egress:latest --build-arg TEMPLATE_TAG=%s -f build/egress/Dockerfile .\", version.TemplateVersion),\n\t)\n}\n\nfunc BuildTemplate() error {\n\treturn mageutil.Run(context.Background(),\n\t\t\"docker pull ubuntu:24.04\",\n\t\t\"docker build -t livekit/egress-templates -f ./build/template/Dockerfile .\",\n\t)\n}\n\nfunc BuildGStreamer() error {\n\treturn buildGstreamer(dockerBuild)\n}\n\nfunc buildGstreamer(cmd string) error {\n\tcommands := []string{\"docker pull ubuntu:23.10\"}\n\tfor _, build := range []string{\"base\", \"dev\", \"prod\", \"prod-rs\"} {\n\t\tcommands = append(commands, fmt.Sprintf(\"%s\"+\n\t\t\t\" --build-arg GSTREAMER_VERSION=%s\"+\n\t\t\t\" --build-arg LIBNICE_VERSION=%s\"+\n\t\t\t\" -t livekit/gstreamer:%s-%s\"+\n\t\t\t\" -t livekit/gstreamer:%s-%s-%s\"+\n\t\t\t\" -f build/gstreamer/Dockerfile-%s\"+\n\t\t\t\" ./build/gstreamer\",\n\t\t\tcmd, gstVersion, libniceVersion, gstVersion, build, gstVersion, build, runtime.GOARCH, build,\n\t\t))\n\t}\n\n\treturn mageutil.Run(context.Background(), commands...)\n}\n\nfunc Dotfiles() error {\n\tfiles, err := os.ReadDir(\"test/output\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdots := make(map[string]bool)\n\tpngs := make(map[string]bool)\n\tfor _, file := range files {\n\t\tname := file.Name()\n\t\tif strings.HasSuffix(name, \".dot\") {\n\t\t\tdots[name[:len(name)-4]] = true\n\t\t} else if strings.HasSuffix(file.Name(), \".png\") {\n\t\t\tpngs[name[:len(name)-4]] = true\n\t\t}\n\t}\n\n\tfor name := range dots {\n\t\tif !pngs[name] {\n\t\t\tif err := mageutil.Run(context.Background(), fmt.Sprintf(\n\t\t\t\t\"dot -Tpng test/output/%s.dot -o test/output/%s.png\",\n\t\t\t\tname, name,\n\t\t\t)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/config/base.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/logger/medialogutils\"\n\t\"github.com/livekit/protocol/redis\"\n\tlksdk \"github.com/livekit/server-sdk-go/v2\"\n\n\t\"github.com/livekit/egress/pkg/types\"\n)\n\nconst TmpDir = \"/home/egress/tmp\"\n\ntype BaseConfig struct {\n\tNodeID string // do not supply - will be overwritten\n\n\t// required\n\tRedis     *redis.RedisConfig `yaml:\"redis\"`      // redis config\n\tApiKey    string             `yaml:\"api_key\"`    // (env LIVEKIT_API_KEY)\n\tApiSecret string             `yaml:\"api_secret\"` // (env LIVEKIT_API_SECRET)\n\tWsUrl     string             `yaml:\"ws_url\"`     // (env LIVEKIT_WS_URL)\n\n\t// optional\n\tLogging              *logger.Config `yaml:\"logging\"`                // logging config\n\tTemplateBase         string         `yaml:\"template_base\"`          // custom template base url\n\tClusterID            string         `yaml:\"cluster_id\"`             // cluster this instance belongs to\n\tEnableChromeSandbox  bool           `yaml:\"enable_chrome_sandbox\"`  // enable Chrome sandbox, requires extra docker configuration\n\tMaxUploadQueue       int            `yaml:\"max_upload_queue\"`       // maximum upload queue size, in minutes\n\tDisallowLocalStorage bool           `yaml:\"disallow_local_storage\"` // require an upload config for all requests\n\tIOCreateTimeout      time.Duration  `yaml:\"io_create_timeout\"`      // timeout for CreateEgress calls\n\tIOUpdateTimeout      time.Duration  `yaml:\"io_update_timeout\"`      // timeout for UpdateEgress calls\n\tIOSelectionTimeout   time.Duration  `yaml:\"io_selection_timeout\"`   // timeout for affinity stage of IO RPC\n\tIOWorkers            int            `yaml:\"io_workers\"`             // number of IO update workers\n\n\tSessionLimits          `yaml:\"session_limits\"` // session duration limits\n\tStorageConfig          *StorageConfig          `yaml:\"storage,omitempty\"`          // storage config\n\tBackupConfig           *StorageConfig          `yaml:\"backup,omitempty\"`           // backup config, for storage failures\n\tS3AssumeRoleKey        string                  `yaml:\"s3_assume_role_key\"`         // if set, this key is used for S3 uploads to assume the role defined in the assume_role_arn field of the S3 config\n\tS3AssumeRoleSecret     string                  `yaml:\"s3_assume_role_secret\"`      // if set, this secret is used for S3 uploads to assume the role defined in the assume_role_arn field of the S3 config\n\tS3AssumeRoleArn        string                  `yaml:\"s3_assume_role_arn\"`         // if set, this arn is used by default for S3 uploads\n\tS3AssumeRoleExternalID string                  `yaml:\"s3_assume_role_external_id\"` // if set, this external ID is used by default for S3 uploads\n\n\t// advanced\n\tInsecure                      bool                                `yaml:\"insecure\"`                           // allow chrome to connect to an insecure websocket, bypasses chrome LNA checks\n\tDebug                         DebugConfig                         `yaml:\"debug\"`                              // create dot file on internal error\n\tChromeFlags                   map[string]interface{}              `yaml:\"chrome_flags\"`                       // additional flags to pass to Chrome\n\tLatency                       LatencyConfig                       `yaml:\"latency\"`                            // gstreamer latencies, modifying these may break the service\n\tLatencyOverrides              map[types.RequestType]LatencyConfig `yaml:\"latency_overrides\"`                  // latency overrides for different request types, experimental only, will be removed\n\tEnableOneShotSenderReportSync bool                                `yaml:\"enable_one_shot_sender_report_sync\"` // temporary rollout flag enabling one-shot sender report correction for room composite / track requests that previously used audio PTS adjustment disabling\n\tAudioTempoController          AudioTempoController                `yaml:\"audio_tempo_controller\"`             // audio tempo controller\n\tTestOverrides                 TestOverrides                       `yaml:\"test_overrides\"`                     // set of config overrides for testing purposes\n}\n\ntype SessionLimits struct {\n\tFileOutputMaxDuration    time.Duration `yaml:\"file_output_max_duration\"`\n\tFileOutputMaxSize        int64         `yaml:\"file_output_max_size\"` // max on-disk size in bytes before stopping; 0 to disable\n\tStreamOutputMaxDuration  time.Duration `yaml:\"stream_output_max_duration\"`\n\tSegmentOutputMaxDuration time.Duration `yaml:\"segment_output_max_duration\"`\n\tImageOutputMaxDuration   time.Duration `yaml:\"image_output_max_duration\"`\n}\n\ntype DebugConfig struct {\n\tEnableProfiling     bool             `yaml:\"enable_profiling\"`      // create dot file and pprof on internal error\n\tEnableTrackLogging  bool             `yaml:\"enable_track_logging\"`  // log packets and keyframes for each track\n\tEnableStreamLogging bool             `yaml:\"enable_stream_logging\"` // log bytes and keyframes for each stream\n\tEnableChromeLogging bool             `yaml:\"enable_chrome_logging\"` // log all chrome console events\n\tStorageConfig       `yaml:\",inline\"` // upload config (S3, Azure, GCP, or AliOSS)\n}\n\ntype LatencyConfig struct {\n\tJitterBufferLatency             time.Duration `yaml:\"jitter_buffer_latency\"`                         // jitter buffer max latency for sdk egress\n\tAudioMixerLatency               time.Duration `yaml:\"audio_mixer_latency\"`                           // audio mixer latency, must be greater than jitter buffer latency\n\tPipelineLatency                 time.Duration `yaml:\"pipeline_latency\"`                              // max latency for the entire pipeline\n\tRTPMaxAllowedTsDiff             time.Duration `ymal:\"rtp_max_allowed_ts_diff\"`                       // max allowed PTS discont. for a RTP stream, before applying PTS alignment\n\tRTPMaxDriftAdjustment           time.Duration `ymal:\"rtp_max_drift_adjustment,omitempty\"`            // max allowed drift adjustment for a RTP stream\n\tRTPDriftAdjustmentWindowPercent float64       `ymal:\"rtp_drift_adjustment_window_percent,omitempty\"` // how much to throttle drift adjustment, 0.0 disables it\n\tOldPacketThreshold              time.Duration `yaml:\"old_packet_threshold,omitempty\"`                // syncrhonizer drops packets older than this, 0 to disable packet drops\n}\n\ntype AudioTempoController struct {\n\tEnabled        bool    `yaml:\"enabled\"`         // enable audio tempo adjustments for compensating PTS drift\n\tAdjustmentRate float64 `yaml:\"adjustment_rate\"` // rate at which to adjust the tempo to compensate for PTS drift\n}\n\nfunc (c *BaseConfig) initLogger(values ...interface{}) error {\n\t_, exists := os.LookupEnv(\"GST_DEBUG\")\n\n\t// If GST_DEBUG is not set, use pre-defined values based on logging level\n\tif !exists {\n\t\tvar gstDebug []string\n\t\tswitch c.Logging.Level {\n\t\tcase \"debug\":\n\t\t\tgstDebug = []string{\"3\"}\n\t\tcase \"info\", \"warn\":\n\t\t\tgstDebug = []string{\"2\"}\n\t\tcase \"error\":\n\t\t\tgstDebug = []string{\"1\"}\n\t\t}\n\t\tgstDebug = append(gstDebug,\n\t\t\t\"rtmpclient:4\",\n\t\t\t\"srtlib:1\",\n\t\t)\n\n\t\tif err := os.Setenv(\"GST_DEBUG\", strings.Join(gstDebug, \",\")); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tzl, err := logger.NewZapLogger(c.Logging)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tl := zl.WithValues(values...)\n\n\tlogger.SetLogger(l, \"egress\")\n\tlksdk.SetLogger(medialogutils.NewOverrideLogger(l.WithComponent(\"lksdk\")))\n\treturn nil\n}\n\nfunc (c *BaseConfig) getLatencyConfig(requestType types.RequestType) LatencyConfig {\n\tif override, ok := c.LatencyOverrides[requestType]; ok {\n\t\treturn override\n\t}\n\treturn c.Latency\n}\n"
  },
  {
    "path": "pkg/config/config_test.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n)\n\nfunc TestSegmentNaming(t *testing.T) {\n\tt.Cleanup(func() {\n\t\t_ = os.RemoveAll(\"conf_test/\")\n\t})\n\n\tfor _, test := range []struct {\n\t\tfilenamePrefix               string\n\t\tplaylistName                 string\n\t\tlivePlaylistName             string\n\t\texpectedStorageDir           string\n\t\texpectedPlaylistFilename     string\n\t\texpectedLivePlaylistFilename string\n\t\texpectedSegmentPrefix        string\n\t}{\n\t\t{\n\t\t\tfilenamePrefix: \"\", playlistName: \"playlist\", livePlaylistName: \"\",\n\t\t\texpectedStorageDir: \"\", expectedPlaylistFilename: \"playlist.m3u8\", expectedLivePlaylistFilename: \"\", expectedSegmentPrefix: \"playlist\",\n\t\t},\n\t\t{\n\t\t\tfilenamePrefix: \"\", playlistName: \"conf_test/playlist\", livePlaylistName: \"conf_test/live_playlist\",\n\t\t\texpectedStorageDir: \"conf_test/\", expectedPlaylistFilename: \"playlist.m3u8\", expectedLivePlaylistFilename: \"live_playlist.m3u8\", expectedSegmentPrefix: \"playlist\",\n\t\t},\n\t\t{\n\t\t\tfilenamePrefix: \"filename\", playlistName: \"\", livePlaylistName: \"live_playlist2.m3u8\",\n\t\t\texpectedStorageDir: \"\", expectedPlaylistFilename: \"filename.m3u8\", expectedLivePlaylistFilename: \"live_playlist2.m3u8\", expectedSegmentPrefix: \"filename\",\n\t\t},\n\t\t{\n\t\t\tfilenamePrefix: \"filename\", playlistName: \"playlist\", livePlaylistName: \"\",\n\t\t\texpectedStorageDir: \"\", expectedPlaylistFilename: \"playlist.m3u8\", expectedLivePlaylistFilename: \"\", expectedSegmentPrefix: \"filename\",\n\t\t},\n\t\t{\n\t\t\tfilenamePrefix: \"filename\", playlistName: \"conf_test/\", livePlaylistName: \"\",\n\t\t\texpectedStorageDir: \"conf_test/\", expectedPlaylistFilename: \"filename.m3u8\", expectedLivePlaylistFilename: \"\", expectedSegmentPrefix: \"filename\",\n\t\t},\n\t\t{\n\t\t\tfilenamePrefix: \"filename\", playlistName: \"conf_test/playlist\", livePlaylistName: \"\",\n\t\t\texpectedStorageDir: \"conf_test/\", expectedPlaylistFilename: \"playlist.m3u8\", expectedLivePlaylistFilename: \"\", expectedSegmentPrefix: \"filename\",\n\t\t},\n\t\t{\n\t\t\tfilenamePrefix: \"conf_test/\", playlistName: \"playlist\", livePlaylistName: \"\",\n\t\t\texpectedStorageDir: \"conf_test/\", expectedPlaylistFilename: \"playlist.m3u8\", expectedLivePlaylistFilename: \"\", expectedSegmentPrefix: \"playlist\",\n\t\t},\n\t\t{\n\t\t\tfilenamePrefix: \"conf_test/filename\", playlistName: \"playlist\", livePlaylistName: \"\",\n\t\t\texpectedStorageDir: \"conf_test/\", expectedPlaylistFilename: \"playlist.m3u8\", expectedLivePlaylistFilename: \"\", expectedSegmentPrefix: \"filename\",\n\t\t},\n\t\t{\n\t\t\tfilenamePrefix: \"conf_test/filename\", playlistName: \"conf_test/playlist\", livePlaylistName: \"\",\n\t\t\texpectedStorageDir: \"conf_test/\", expectedPlaylistFilename: \"playlist.m3u8\", expectedLivePlaylistFilename: \"\", expectedSegmentPrefix: \"filename\",\n\t\t},\n\t\t{\n\t\t\tfilenamePrefix: \"conf_test_2/filename\", playlistName: \"conf_test/playlist\", livePlaylistName: \"\",\n\t\t\texpectedStorageDir: \"conf_test/\", expectedPlaylistFilename: \"playlist.m3u8\", expectedLivePlaylistFilename: \"\", expectedSegmentPrefix: \"conf_test_2/filename\",\n\t\t},\n\t} {\n\t\tp := &PipelineConfig{Info: &livekit.EgressInfo{EgressId: \"egress_ID\"}}\n\t\tseg := &livekit.SegmentedFileOutput{\n\t\t\tFilenamePrefix:   test.filenamePrefix,\n\t\t\tPlaylistName:     test.playlistName,\n\t\t\tLivePlaylistName: test.livePlaylistName,\n\t\t}\n\t\to, err := p.getSegmentConfig(seg, seg)\n\t\trequire.NoError(t, err)\n\n\t\trequire.Equal(t, test.expectedStorageDir, o.StorageDir)\n\t\trequire.Equal(t, test.expectedPlaylistFilename, o.PlaylistFilename)\n\t\trequire.Equal(t, test.expectedLivePlaylistFilename, o.LivePlaylistFilename)\n\t\trequire.Equal(t, test.expectedSegmentPrefix, o.SegmentPrefix)\n\t}\n}\n\nfunc TestValidateAndUpdateOutputParamsRejectsHLSMP3(t *testing.T) {\n\tp := &PipelineConfig{\n\t\tOutputs: map[types.EgressType][]OutputConfig{\n\t\t\ttypes.EgressTypeSegments: {\n\t\t\t\t&SegmentConfig{outputConfig: outputConfig{OutputType: types.OutputTypeHLS}},\n\t\t\t},\n\t\t},\n\t}\n\n\tp.AudioEnabled = true\n\tp.VideoEnabled = false\n\tp.AudioOutCodec = types.MimeTypeMP3\n\tp.Info = &livekit.EgressInfo{}\n\n\terr := p.validateAndUpdateOutputParams()\n\trequire.Error(t, err)\n\trequire.ErrorContains(t, err, \"format application/x-mpegurl incompatible with codec audio/mpeg\")\n}\n\nfunc TestValidateAndUpdateOutputParamsRejectsVideoFileMP3(t *testing.T) {\n\tp := &PipelineConfig{\n\t\tOutputs: map[types.EgressType][]OutputConfig{\n\t\t\ttypes.EgressTypeFile: {\n\t\t\t\t&FileConfig{outputConfig: outputConfig{OutputType: types.OutputTypeMP3}},\n\t\t\t},\n\t\t},\n\t}\n\n\tp.AudioEnabled = true\n\tp.VideoEnabled = true\n\tp.AudioOutCodec = types.MimeTypeMP3\n\tp.VideoOutCodec = types.MimeTypeH264\n\tp.Info = &livekit.EgressInfo{}\n\n\terr := p.validateAndUpdateOutputParams()\n\trequire.Error(t, err)\n\trequire.ErrorContains(t, err, \"format audio/mpeg incompatible with codec video/h264\")\n}\n"
  },
  {
    "path": "pkg/config/encoding.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n)\n\nfunc (p *PipelineConfig) applyPreset(preset livekit.EncodingOptionsPreset) {\n\tswitch preset {\n\tcase livekit.EncodingOptionsPreset_H264_720P_30:\n\t\tp.Width = 1280\n\t\tp.Height = 720\n\t\tp.Framerate = 30\n\t\tp.VideoBitrate = 3000\n\n\tcase livekit.EncodingOptionsPreset_H264_720P_60:\n\t\tp.Width = 1280\n\t\tp.Height = 720\n\t\tp.Framerate = 60\n\t\tp.VideoBitrate = 4500\n\n\tcase livekit.EncodingOptionsPreset_H264_1080P_30:\n\t\tp.Width = 1920\n\t\tp.Height = 1080\n\t\tp.Framerate = 30\n\t\tp.VideoBitrate = 4500\n\n\tcase livekit.EncodingOptionsPreset_H264_1080P_60:\n\t\tp.Width = 1920\n\t\tp.Height = 1080\n\t\tp.Framerate = 60\n\t\tp.VideoBitrate = 6000\n\n\tcase livekit.EncodingOptionsPreset_PORTRAIT_H264_720P_30:\n\t\tp.Width = 720\n\t\tp.Height = 1280\n\t\tp.Framerate = 30\n\t\tp.VideoBitrate = 3000\n\n\tcase livekit.EncodingOptionsPreset_PORTRAIT_H264_720P_60:\n\t\tp.Width = 720\n\t\tp.Height = 1280\n\t\tp.Framerate = 60\n\t\tp.VideoBitrate = 4500\n\n\tcase livekit.EncodingOptionsPreset_PORTRAIT_H264_1080P_30:\n\t\tp.Width = 1080\n\t\tp.Height = 1920\n\t\tp.Framerate = 30\n\t\tp.VideoBitrate = 4500\n\n\tcase livekit.EncodingOptionsPreset_PORTRAIT_H264_1080P_60:\n\t\tp.Width = 1080\n\t\tp.Height = 1920\n\t\tp.Framerate = 60\n\t\tp.VideoBitrate = 6000\n\t}\n}\n\nfunc (p *PipelineConfig) applyAdvanced(advanced *livekit.EncodingOptions) error {\n\t// audio\n\tswitch advanced.AudioCodec {\n\tcase livekit.AudioCodec_OPUS:\n\t\tp.AudioOutCodec = types.MimeTypeOpus\n\tcase livekit.AudioCodec_AAC:\n\t\tp.AudioOutCodec = types.MimeTypeAAC\n\tcase livekit.AudioCodec_AC_MP3:\n\t\tp.AudioOutCodec = types.MimeTypeMP3\n\t}\n\n\tif advanced.AudioBitrate != 0 {\n\t\tp.AudioBitrate = advanced.AudioBitrate\n\t}\n\tif advanced.AudioFrequency != 0 {\n\t\tp.AudioFrequency = advanced.AudioFrequency\n\t}\n\n\t// video\n\tswitch advanced.VideoCodec {\n\tcase livekit.VideoCodec_H264_BASELINE:\n\t\tp.VideoOutCodec = types.MimeTypeH264\n\t\tp.VideoProfile = types.ProfileBaseline\n\n\tcase livekit.VideoCodec_H264_MAIN:\n\t\tp.VideoOutCodec = types.MimeTypeH264\n\n\tcase livekit.VideoCodec_H264_HIGH:\n\t\tp.VideoOutCodec = types.MimeTypeH264\n\t\tp.VideoProfile = types.ProfileHigh\n\t}\n\n\tif advanced.Width > 0 {\n\t\tif advanced.Width < 16 || advanced.Width%2 == 1 {\n\t\t\treturn errors.ErrInvalidInput(\"width\")\n\t\t}\n\t\tp.Width = advanced.Width\n\t}\n\n\tif advanced.Height > 0 {\n\t\tif advanced.Height < 16 || advanced.Height%2 == 1 {\n\t\t\treturn errors.ErrInvalidInput(\"height\")\n\t\t}\n\t\tp.Height = advanced.Height\n\t}\n\n\tswitch advanced.Depth {\n\tcase 0:\n\tcase 8, 16, 24:\n\t\tp.Depth = advanced.Depth\n\tdefault:\n\t\treturn errors.ErrInvalidInput(\"depth\")\n\t}\n\n\tif advanced.Framerate != 0 {\n\t\tp.Framerate = advanced.Framerate\n\t}\n\tif advanced.VideoBitrate != 0 {\n\t\tp.VideoBitrate = advanced.VideoBitrate\n\t}\n\tif advanced.KeyFrameInterval != 0 {\n\t\tp.KeyFrameInterval = advanced.KeyFrameInterval\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/config/manifest.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"time\"\n\n\t\"github.com/linkdata/deadlock\"\n)\n\ntype Manifest struct {\n\tEgressID          string `json:\"egress_id,omitempty\"`\n\tRoomID            string `json:\"room_id,omitempty\"`\n\tRoomName          string `json:\"room_name,omitempty\"`\n\tUrl               string `json:\"url,omitempty\"`\n\tStartedAt         int64  `json:\"started_at,omitempty\"`\n\tEndedAt           int64  `json:\"ended_at,omitempty\"`\n\tPublisherIdentity string `json:\"publisher_identity,omitempty\"`\n\tTrackID           string `json:\"track_id,omitempty\"`\n\tTrackKind         string `json:\"track_kind,omitempty\"`\n\tTrackSource       string `json:\"track_source,omitempty\"`\n\tAudioTrackID      string `json:\"audio_track_id,omitempty\"`\n\tVideoTrackID      string `json:\"video_track_id,omitempty\"`\n\n\tmu        deadlock.Mutex\n\tFiles     []*File     `json:\"files,omitempty\"`\n\tPlaylists []*Playlist `json:\"playlists,omitempty\"`\n\tImages    []*Image    `json:\"images,omitempty\"`\n}\n\ntype File struct {\n\tFilename string `json:\"filename,omitempty\"`\n\tLocation string `json:\"location,omitempty\"`\n}\n\ntype Playlist struct {\n\tmu       deadlock.Mutex\n\tLocation string     `json:\"location,omitempty\"`\n\tSegments []*Segment `json:\"segments,omitempty\"`\n}\n\ntype Segment struct {\n\tFilename string `json:\"filename,omitempty\"`\n\tLocation string `json:\"location,omitempty\"`\n}\n\ntype Image struct {\n\tFilename  string    `json:\"filename,omitempty\"`\n\tTimestamp time.Time `json:\"timestamp,omitempty\"`\n\tLocation  string    `json:\"location,omitempty\"`\n}\n\nfunc (p *PipelineConfig) initManifest() {\n\tif p.shouldCreateManifest() {\n\t\tp.Manifest = &Manifest{\n\t\t\tEgressID:          p.Info.EgressId,\n\t\t\tRoomID:            p.Info.RoomId,\n\t\t\tRoomName:          p.Info.RoomName,\n\t\t\tUrl:               p.WebUrl,\n\t\t\tStartedAt:         p.Info.StartedAt,\n\t\t\tPublisherIdentity: p.Identity,\n\t\t\tTrackID:           p.TrackID,\n\t\t\tTrackKind:         p.TrackKind,\n\t\t\tTrackSource:       p.TrackSource,\n\t\t\tAudioTrackID:      p.AudioTrackID,\n\t\t\tVideoTrackID:      p.VideoTrackID,\n\t\t}\n\t}\n}\n\nfunc (p *PipelineConfig) shouldCreateManifest() bool {\n\tif p.BackupConfig != nil {\n\t\treturn true\n\t}\n\tif fc := p.GetFileConfig(); fc != nil && !fc.DisableManifest {\n\t\treturn true\n\t}\n\tif sc := p.GetSegmentConfig(); sc != nil && !sc.DisableManifest {\n\t\treturn true\n\t}\n\tfor _, ic := range p.GetImageConfigs() {\n\t\tif !ic.DisableManifest {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (m *Manifest) AddFile(filename, location string) {\n\tm.mu.Lock()\n\tm.Files = append(m.Files, &File{\n\t\tFilename: filename,\n\t\tLocation: location,\n\t})\n\tm.mu.Unlock()\n}\n\nfunc (m *Manifest) AddPlaylist() *Playlist {\n\tp := &Playlist{}\n\n\tm.mu.Lock()\n\tm.Playlists = append(m.Playlists, p)\n\tm.mu.Unlock()\n\n\treturn p\n}\n\nfunc (p *Playlist) UpdateLocation(location string) {\n\tp.mu.Lock()\n\tp.Location = location\n\tp.mu.Unlock()\n}\n\nfunc (p *Playlist) AddSegment(filename, location string) {\n\tp.mu.Lock()\n\tp.Segments = append(p.Segments, &Segment{\n\t\tFilename: filename,\n\t\tLocation: location,\n\t})\n\tp.mu.Unlock()\n}\n\nfunc (m *Manifest) AddImage(filename string, ts time.Time, location string) {\n\tm.mu.Lock()\n\tm.Images = append(m.Images, &Image{\n\t\tFilename:  filename,\n\t\tTimestamp: ts,\n\t\tLocation:  location,\n\t})\n\tm.mu.Unlock()\n}\n\nfunc (m *Manifest) Close(endedAt int64) ([]byte, error) {\n\tm.EndedAt = endedAt\n\n\tbuf := bytes.NewBuffer(nil)\n\tenc := json.NewEncoder(buf)\n\tenc.SetEscapeHTML(false)\n\tif err := enc.Encode(m); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}\n"
  },
  {
    "path": "pkg/config/output.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"net/url\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/egress\"\n\t\"github.com/livekit/protocol/livekit\"\n)\n\nconst StreamKeyframeInterval = 4.0\n\ntype OutputConfig interface {\n\tGetOutputType() types.OutputType\n}\n\ntype outputConfig struct {\n\ttypes.OutputType\n}\n\nfunc (o outputConfig) GetOutputType() types.OutputType {\n\treturn o.OutputType\n}\n\nfunc (p *PipelineConfig) updateEncodedOutputs(req egress.EncodedOutput) error {\n\tfiles := req.GetFileOutputs()\n\tstreams := req.GetStreamOutputs()\n\tsegments := req.GetSegmentOutputs()\n\timages := req.GetImageOutputs()\n\n\t// file output\n\tvar file *livekit.EncodedFileOutput\n\tswitch len(files) {\n\tcase 0:\n\t\tif r, ok := req.(egress.EncodedOutputDeprecated); ok {\n\t\t\tfile = r.GetFile()\n\t\t}\n\tcase 1:\n\t\tfile = files[0]\n\tdefault:\n\t\treturn errors.ErrInvalidInput(\"multiple file outputs\")\n\t}\n\tif file != nil {\n\t\tconf, err := p.getEncodedFileConfig(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tp.Outputs[types.EgressTypeFile] = []OutputConfig{conf}\n\t\tp.OutputCount.Inc()\n\t\tp.FinalizationRequired = true\n\t\tif p.VideoEnabled {\n\t\t\tp.VideoEncoding = true\n\t\t}\n\n\t\tp.Info.FileResults = []*livekit.FileInfo{conf.FileInfo}\n\t\tif len(streams)+len(segments)+len(images) == 0 {\n\t\t\tp.Info.Result = &livekit.EgressInfo_File{File: conf.FileInfo}\n\t\t}\n\t}\n\n\t// stream output\n\tvar stream *livekit.StreamOutput\n\tswitch len(streams) {\n\tcase 0:\n\t\tif r, ok := req.(egress.EncodedOutputDeprecated); ok {\n\t\t\tstream = r.GetStream()\n\t\t}\n\tcase 1:\n\t\tstream = streams[0]\n\tdefault:\n\t\treturn errors.ErrInvalidInput(\"multiple stream outputs\")\n\t}\n\tif stream != nil {\n\t\tvar outputType types.OutputType\n\t\tswitch stream.Protocol {\n\t\tcase livekit.StreamProtocol_DEFAULT_PROTOCOL:\n\t\t\tif len(stream.Urls) == 0 {\n\t\t\t\treturn errors.ErrInvalidInput(\"stream protocol\")\n\t\t\t}\n\n\t\t\tparsed, err := url.Parse(stream.Urls[0])\n\t\t\tif err != nil {\n\t\t\t\treturn errors.ErrInvalidUrl(stream.Urls[0], err.Error())\n\t\t\t}\n\n\t\t\tvar ok bool\n\t\t\toutputType, ok = types.StreamOutputTypes[parsed.Scheme]\n\t\t\tif !ok {\n\t\t\t\treturn errors.ErrInvalidUrl(stream.Urls[0], \"invalid protocol\")\n\t\t\t}\n\n\t\tcase livekit.StreamProtocol_RTMP:\n\t\t\toutputType = types.OutputTypeRTMP\n\n\t\tcase livekit.StreamProtocol_SRT:\n\t\t\toutputType = types.OutputTypeSRT\n\t\t}\n\n\t\tconf, err := p.getStreamConfig(outputType, stream.Urls)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tp.Outputs[types.EgressTypeStream] = []OutputConfig{conf}\n\t\tp.OutputCount.Add(int32(len(stream.Urls)))\n\t\tif p.VideoEnabled {\n\t\t\tp.VideoEncoding = true\n\t\t}\n\n\t\tstreamInfoList := make([]*livekit.StreamInfo, 0, len(stream.Urls))\n\t\tconf.Streams.Range(func(_, stream any) bool {\n\t\t\tstreamInfoList = append(streamInfoList, stream.(*Stream).StreamInfo)\n\t\t\treturn true\n\t\t})\n\t\tp.Info.StreamResults = streamInfoList\n\n\t\tif len(files)+len(segments)+len(images) == 0 {\n\t\t\t// empty stream output only valid in combination with other outputs\n\t\t\tif len(stream.Urls) == 0 {\n\t\t\t\treturn errors.ErrInvalidInput(\"stream url\")\n\t\t\t}\n\n\t\t\tp.Info.Result = &livekit.EgressInfo_Stream{Stream: &livekit.StreamInfoList{Info: streamInfoList}} //nolint:staticcheck // keep deprecated field for older clients\n\t\t}\n\t}\n\n\t// segment output\n\tvar segment *livekit.SegmentedFileOutput\n\tswitch len(segments) {\n\tcase 0:\n\t\tif r, ok := req.(egress.EncodedOutputDeprecated); ok {\n\t\t\tsegment = r.GetSegments()\n\t\t}\n\tcase 1:\n\t\tsegment = segments[0]\n\tdefault:\n\t\treturn errors.ErrInvalidInput(\"multiple segmented file outputs\")\n\t}\n\tif segment != nil {\n\t\tconf, err := p.getSegmentConfig(segment, segment)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tp.Outputs[types.EgressTypeSegments] = []OutputConfig{conf}\n\t\tp.OutputCount.Inc()\n\t\tp.FinalizationRequired = true\n\t\tif p.VideoEnabled {\n\t\t\tp.VideoEncoding = true\n\t\t}\n\n\t\tp.Info.SegmentResults = []*livekit.SegmentsInfo{conf.SegmentsInfo}\n\t\tif len(streams)+len(files)+len(images) == 0 {\n\t\t\tp.Info.Result = &livekit.EgressInfo_Segments{Segments: conf.SegmentsInfo}\n\t\t}\n\t}\n\n\tif segmentConf := p.Outputs[types.EgressTypeSegments]; segmentConf != nil {\n\t\tif stream != nil && p.KeyFrameInterval > 0 {\n\t\t\t// segment duration must match keyframe interval - use the lower of the two\n\t\t\tconf := segmentConf[0].(*SegmentConfig)\n\t\t\tconf.SegmentDuration = min(int(p.KeyFrameInterval), conf.SegmentDuration)\n\t\t}\n\t\tp.KeyFrameInterval = 0\n\t} else if p.KeyFrameInterval == 0 && p.Outputs[types.EgressTypeStream] != nil {\n\t\t// default 4s for streams\n\t\tp.KeyFrameInterval = StreamKeyframeInterval\n\t}\n\n\t// image output\n\tif len(images) > 0 {\n\t\tif !p.VideoEnabled {\n\t\t\treturn errors.ErrInvalidInput(\"audio_only images\")\n\t\t}\n\n\t\tif len(p.Outputs) == 0 {\n\t\t\t// enforce video only\n\t\t\tp.AudioEnabled = false\n\t\t\tp.AudioTrackID = \"\"\n\t\t\tp.AudioTranscoding = false\n\t\t}\n\n\t\tfor _, img := range images {\n\t\t\tconf, err := p.getImageConfig(img, img)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tp.Outputs[types.EgressTypeImages] = append(p.Outputs[types.EgressTypeImages], conf)\n\t\t\tp.OutputCount.Inc()\n\t\t\tp.FinalizationRequired = true\n\n\t\t\tp.Info.ImageResults = append(p.Info.ImageResults, conf.ImagesInfo)\n\t\t}\n\t}\n\n\tif p.OutputCount.Load() == 0 {\n\t\treturn errors.ErrInvalidInput(\"output\")\n\t}\n\n\treturn nil\n}\n\nfunc (p *PipelineConfig) updateOutputs(req *livekit.ExportReplayRequest) error {\n\tif len(req.Outputs) == 0 {\n\t\treturn errors.ErrInvalidInput(\"output\")\n\t}\n\n\t// Non-live pipelines produce data faster than realtime. Stream outputs\n\t// (RTMP, WebSocket) cannot ingest faster than 1x playback speed.\n\tif !p.Live {\n\t\tfor _, output := range req.Outputs {\n\t\t\tif _, ok := output.Config.(*livekit.Output_Stream); ok {\n\t\t\t\treturn errors.ErrNotSupported(\"stream output for non-live pipeline\")\n\t\t\t}\n\t\t}\n\t}\n\n\tvar hasFile, hasStream, hasSegments bool\n\tvar fileCount, streamCount, segmentCount int\n\n\tfor _, output := range req.Outputs {\n\t\tstorage := resolveStorageConfig(output.Storage, req.Storage)\n\n\t\tswitch o := output.Config.(type) {\n\t\tcase *livekit.Output_File:\n\t\t\tfileCount++\n\t\t\tif fileCount > 1 {\n\t\t\t\treturn errors.ErrInvalidInput(\"multiple file outputs\")\n\t\t\t}\n\t\t\thasFile = true\n\n\t\t\tconf, err := p.getFileConfig(fileTypeToOutputType(o.File.FileType), o.File.GetFilepath(), o.File.GetDisableManifest(), storage)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tp.Outputs[types.EgressTypeFile] = []OutputConfig{conf}\n\t\t\tp.OutputCount.Inc()\n\t\t\tp.FinalizationRequired = true\n\t\t\tif p.VideoEnabled {\n\t\t\t\tp.VideoEncoding = true\n\t\t\t}\n\n\t\t\tp.Info.FileResults = []*livekit.FileInfo{conf.FileInfo}\n\n\t\tcase *livekit.Output_Stream:\n\t\t\tstream := o.Stream\n\t\t\tstreamCount++\n\t\t\tif streamCount > 1 {\n\t\t\t\treturn errors.ErrInvalidInput(\"multiple stream outputs\")\n\t\t\t}\n\t\t\thasStream = true\n\n\t\t\tvar outputType types.OutputType\n\t\t\tvar egressType types.EgressType\n\n\t\t\tswitch stream.Protocol {\n\t\t\tcase livekit.StreamProtocol_DEFAULT_PROTOCOL:\n\t\t\t\tif len(stream.Urls) == 0 {\n\t\t\t\t\treturn errors.ErrInvalidInput(\"stream protocol\")\n\t\t\t\t}\n\t\t\t\tparsed, err := url.Parse(stream.Urls[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.ErrInvalidUrl(stream.Urls[0], err.Error())\n\t\t\t\t}\n\t\t\t\tvar ok bool\n\t\t\t\toutputType, ok = types.StreamOutputTypes[parsed.Scheme]\n\t\t\t\tif !ok {\n\t\t\t\t\treturn errors.ErrInvalidUrl(stream.Urls[0], \"invalid protocol\")\n\t\t\t\t}\n\t\t\t\tif parsed.Scheme == \"ws\" || parsed.Scheme == \"wss\" {\n\t\t\t\t\tegressType = types.EgressTypeWebsocket\n\t\t\t\t} else {\n\t\t\t\t\tegressType = types.EgressTypeStream\n\t\t\t\t}\n\n\t\t\tcase livekit.StreamProtocol_RTMP:\n\t\t\t\toutputType = types.OutputTypeRTMP\n\t\t\t\tegressType = types.EgressTypeStream\n\n\t\t\tcase livekit.StreamProtocol_SRT:\n\t\t\t\toutputType = types.OutputTypeSRT\n\t\t\t\tegressType = types.EgressTypeStream\n\n\t\t\tcase livekit.StreamProtocol_WEBSOCKET:\n\t\t\t\toutputType = types.OutputTypeRaw\n\t\t\t\tegressType = types.EgressTypeWebsocket\n\t\t\t}\n\n\t\t\t// websocket is audio-only\n\t\t\tif egressType == types.EgressTypeWebsocket && p.VideoEnabled && p.AudioEnabled {\n\t\t\t\tp.VideoEnabled = false\n\t\t\t\tp.VideoDecoding = false\n\t\t\t}\n\n\t\t\tconf, err := p.getStreamConfig(outputType, stream.Urls)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tp.Outputs[egressType] = []OutputConfig{conf}\n\t\t\tp.OutputCount.Add(int32(len(stream.Urls)))\n\t\t\tif p.VideoEnabled {\n\t\t\t\tp.VideoEncoding = true\n\t\t\t}\n\n\t\t\tstreamInfoList := make([]*livekit.StreamInfo, 0, len(stream.Urls))\n\t\t\tconf.Streams.Range(func(_, s any) bool {\n\t\t\t\tstreamInfoList = append(streamInfoList, s.(*Stream).StreamInfo)\n\t\t\t\treturn true\n\t\t\t})\n\t\t\tp.Info.StreamResults = streamInfoList\n\n\t\tcase *livekit.Output_Segments:\n\t\t\tsegmentCount++\n\t\t\tif segmentCount > 1 {\n\t\t\t\treturn errors.ErrInvalidInput(\"multiple segmented file outputs\")\n\t\t\t}\n\t\t\thasSegments = true\n\n\t\t\tconf, err := p.getSegmentConfig(o.Segments, storage)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tp.Outputs[types.EgressTypeSegments] = []OutputConfig{conf}\n\t\t\tp.OutputCount.Inc()\n\t\t\tp.FinalizationRequired = true\n\t\t\tif p.VideoEnabled {\n\t\t\t\tp.VideoEncoding = true\n\t\t\t}\n\n\t\t\tp.Info.SegmentResults = []*livekit.SegmentsInfo{conf.SegmentsInfo}\n\n\t\tcase *livekit.Output_Images:\n\t\t\tif !p.VideoEnabled {\n\t\t\t\treturn errors.ErrInvalidInput(\"audio_only images\")\n\t\t\t}\n\n\t\t\tconf, err := p.getImageConfig(o.Images, storage)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tp.Outputs[types.EgressTypeImages] = append(p.Outputs[types.EgressTypeImages], conf)\n\t\t\tp.OutputCount.Inc()\n\t\t\tp.FinalizationRequired = true\n\n\t\t\tp.Info.ImageResults = append(p.Info.ImageResults, conf.ImagesInfo)\n\n\t\tdefault:\n\t\t\treturn errors.ErrInvalidInput(\"output config\")\n\t\t}\n\t}\n\n\tif p.OutputCount.Load() == 0 {\n\t\treturn errors.ErrInvalidInput(\"output\")\n\t}\n\n\t// image-only: enforce video only\n\tif !hasFile && !hasStream && !hasSegments && len(p.Outputs[types.EgressTypeImages]) > 0 {\n\t\tp.AudioEnabled = false\n\t\tp.AudioTranscoding = false\n\t}\n\n\t// populate deprecated single-result field for older clients\n\tif hasFile && !hasStream && !hasSegments && len(p.Outputs[types.EgressTypeImages]) == 0 {\n\t\tif fc := p.GetFileConfig(); fc != nil {\n\t\t\tp.Info.Result = &livekit.EgressInfo_File{File: fc.FileInfo}\n\t\t}\n\t} else if hasStream && !hasFile && !hasSegments && len(p.Outputs[types.EgressTypeImages]) == 0 {\n\t\tif len(p.Info.StreamResults) > 0 {\n\t\t\tp.Info.Result = &livekit.EgressInfo_Stream{Stream: &livekit.StreamInfoList{Info: p.Info.StreamResults}} //nolint:staticcheck\n\t\t}\n\t} else if hasSegments && !hasFile && !hasStream && len(p.Outputs[types.EgressTypeImages]) == 0 {\n\t\tif sc := p.GetSegmentConfig(); sc != nil {\n\t\t\tp.Info.Result = &livekit.EgressInfo_Segments{Segments: sc.SegmentsInfo}\n\t\t}\n\t}\n\n\t// keyframe interval handling\n\tif segmentConf := p.Outputs[types.EgressTypeSegments]; segmentConf != nil {\n\t\tif hasStream && p.KeyFrameInterval > 0 {\n\t\t\tconf := segmentConf[0].(*SegmentConfig)\n\t\t\tconf.SegmentDuration = min(int(p.KeyFrameInterval), conf.SegmentDuration)\n\t\t}\n\t\tp.KeyFrameInterval = 0\n\t} else if p.KeyFrameInterval == 0 && p.Outputs[types.EgressTypeStream] != nil {\n\t\tp.KeyFrameInterval = StreamKeyframeInterval\n\t}\n\n\treturn nil\n}\n\nfunc (p *PipelineConfig) updateDirectOutput(req *livekit.TrackEgressRequest) error {\n\tswitch o := req.Output.(type) {\n\tcase *livekit.TrackEgressRequest_File:\n\t\tconf, err := p.getDirectFileConfig(o.File)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tp.Info.FileResults = []*livekit.FileInfo{conf.FileInfo}\n\t\tp.Info.Result = &livekit.EgressInfo_File{File: conf.FileInfo}\n\n\t\tp.Outputs[types.EgressTypeFile] = []OutputConfig{conf}\n\t\tp.OutputCount.Inc()\n\t\tp.FinalizationRequired = true\n\n\tcase *livekit.TrackEgressRequest_WebsocketUrl:\n\t\tconf, err := p.getStreamConfig(types.OutputTypeRaw, []string{o.WebsocketUrl})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tstreamInfoList := make([]*livekit.StreamInfo, 0, 1)\n\t\tconf.Streams.Range(func(_, stream any) bool {\n\t\t\tstreamInfoList = append(streamInfoList, stream.(*Stream).StreamInfo)\n\t\t\treturn true\n\t\t})\n\n\t\tp.Info.StreamResults = streamInfoList\n\t\tp.Info.Result = &livekit.EgressInfo_Stream{Stream: &livekit.StreamInfoList{Info: streamInfoList}} //nolint:staticcheck // keep deprecated field for older clients\n\n\t\tp.Outputs[types.EgressTypeWebsocket] = []OutputConfig{conf}\n\t\tp.OutputCount.Inc()\n\n\tdefault:\n\t\treturn errors.ErrInvalidInput(\"output\")\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/config/output_file.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/egress\"\n\t\"github.com/livekit/protocol/livekit\"\n)\n\ntype FileConfig struct {\n\toutputConfig\n\n\tFileInfo        *livekit.FileInfo\n\tLocalFilepath   string\n\tStorageFilepath string\n\n\tDisableManifest bool\n\tStorageConfig   *StorageConfig\n}\n\nfunc (p *PipelineConfig) GetFileConfig() *FileConfig {\n\to, ok := p.Outputs[types.EgressTypeFile]\n\tif !ok || len(o) == 0 {\n\t\treturn nil\n\t}\n\treturn o[0].(*FileConfig)\n}\n\nfunc (p *PipelineConfig) getEncodedFileConfig(file *livekit.EncodedFileOutput) (*FileConfig, error) {\n\treturn p.getFileConfig(fileTypeToOutputType(file.FileType), file.GetFilepath(), file.GetDisableManifest(), file)\n}\n\nfunc (p *PipelineConfig) getDirectFileConfig(file *livekit.DirectFileOutput) (*FileConfig, error) {\n\treturn p.getFileConfig(types.OutputTypeUnknownFile, file.GetFilepath(), file.GetDisableManifest(), file)\n}\n\nfunc fileTypeToOutputType(ft livekit.EncodedFileType) types.OutputType {\n\tswitch ft {\n\tcase livekit.EncodedFileType_MP4:\n\t\treturn types.OutputTypeMP4\n\tcase livekit.EncodedFileType_OGG:\n\t\treturn types.OutputTypeOGG\n\tcase livekit.EncodedFileType_MP3:\n\t\treturn types.OutputTypeMP3\n\tdefault:\n\t\treturn types.OutputTypeUnknownFile\n\t}\n}\n\nfunc (p *PipelineConfig) getFileConfig(outputType types.OutputType, filepath string, disableManifest bool, upload egress.UploadRequest) (*FileConfig, error) {\n\tsc, err := p.getStorageConfig(upload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfilepath = clean(filepath)\n\n\t// On retry, explicit paths must include {retry} to avoid overwriting previous attempt.\n\t// Empty or directory-only paths are auto-generated with retry count appended.\n\tif p.Info.RetryCount > 0 && filepath != \"\" && !strings.HasSuffix(filepath, \"/\") && !strings.Contains(filepath, \"{retry}\") {\n\t\treturn nil, errors.ErrNonRetryableOutput\n\t}\n\n\tconf := &FileConfig{\n\t\toutputConfig:    outputConfig{OutputType: outputType},\n\t\tFileInfo:        &livekit.FileInfo{},\n\t\tStorageFilepath: filepath,\n\t\tDisableManifest: disableManifest,\n\t\tStorageConfig:   sc,\n\t}\n\n\t// filename\n\tidentifier, replacements := p.getFilenameInfo()\n\tif conf.OutputType != types.OutputTypeUnknownFile {\n\t\terr := conf.updateFilepath(p, identifier, replacements)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tconf.StorageFilepath = stringReplace(conf.StorageFilepath, replacements)\n\t}\n\n\treturn conf, nil\n}\n\nfunc (p *PipelineConfig) getFilenameInfo() (string, map[string]string) {\n\tnow := time.Now()\n\tutc := fmt.Sprintf(\"%s%03d\", now.Format(\"20060102150405\"), now.UnixMilli()%1000)\n\n\treplacements := make(map[string]string)\n\tif p.Info.RetryCount > 0 {\n\t\treplacements[\"{retry}\"] = fmt.Sprintf(\"%d\", p.Info.RetryCount)\n\t}\n\tif p.Info.RoomName != \"\" {\n\t\treplacements[\"{room_name}\"] = p.Info.RoomName\n\t\treplacements[\"{room_id}\"] = p.Info.RoomId\n\t\treplacements[\"{time}\"] = now.Format(\"2006-01-02T150405\")\n\t\treplacements[\"{utc}\"] = utc\n\t\treturn p.Info.RoomName, replacements\n\t}\n\n\treplacements[\"{time}\"] = now.Format(\"2006-01-02T150405\")\n\treplacements[\"{utc}\"] = utc\n\n\treturn \"web\", replacements\n}\n\nfunc (o *FileConfig) updateFilepath(p *PipelineConfig, identifier string, replacements map[string]string) error {\n\to.StorageFilepath = stringReplace(o.StorageFilepath, replacements)\n\n\t// get file extension\n\text := types.FileExtensionForOutputType[o.OutputType]\n\n\tif o.StorageFilepath == \"\" || strings.HasSuffix(o.StorageFilepath, \"/\") {\n\t\t// generate filepath\n\t\tbaseName := fmt.Sprintf(\"%s-%s\", identifier, time.Now().Format(\"2006-01-02T150405\"))\n\t\tif p.Info.RetryCount > 0 {\n\t\t\tbaseName = fmt.Sprintf(\"%s-%d\", baseName, p.Info.RetryCount)\n\t\t}\n\t\to.StorageFilepath = fmt.Sprintf(\"%s%s%s\", o.StorageFilepath, baseName, ext)\n\t} else if !strings.HasSuffix(o.StorageFilepath, string(ext)) {\n\t\t// check for existing (incorrect) extension\n\t\tif extIdx := strings.LastIndex(o.StorageFilepath, \".\"); extIdx > -1 {\n\t\t\texistingExt := types.FileExtension(o.StorageFilepath[extIdx:])\n\t\t\tif _, ok := types.FileExtensions[existingExt]; ok {\n\t\t\t\to.StorageFilepath = o.StorageFilepath[:extIdx]\n\t\t\t}\n\t\t}\n\n\t\t// add file extension\n\t\to.StorageFilepath = o.StorageFilepath + string(ext)\n\t}\n\n\t// update filename\n\to.FileInfo.Filename = o.StorageFilepath\n\n\t// get local filepath\n\t_, filename := path.Split(o.StorageFilepath)\n\n\t// write to tmp dir\n\to.LocalFilepath = path.Join(p.TmpDir, filename)\n\n\treturn nil\n}\n\nfunc clean(filepath string) string {\n\thasEndingSlash := strings.HasSuffix(filepath, \"/\")\n\tfilepath = path.Clean(filepath)\n\tfor strings.HasPrefix(filepath, \"../\") {\n\t\tfilepath = filepath[3:]\n\t}\n\tif filepath == \"\" || filepath == \".\" || filepath == \"..\" {\n\t\treturn \"\"\n\t}\n\tif hasEndingSlash {\n\t\treturn filepath + \"/\"\n\t}\n\treturn filepath\n}\n"
  },
  {
    "path": "pkg/config/output_image.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/egress\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/utils\"\n)\n\ntype ImageConfig struct {\n\toutputConfig\n\n\tId string // Used internally to map a gst Bin/element back to a sink and as part of the path\n\n\tImagesInfo     *livekit.ImagesInfo\n\tLocalDir       string\n\tStorageDir     string\n\tImagePrefix    string\n\tImageSuffix    livekit.ImageFileSuffix\n\tImageExtension types.FileExtension\n\n\tDisableManifest bool\n\tStorageConfig   *StorageConfig\n\n\tCaptureInterval uint32\n\tWidth           int32\n\tHeight          int32\n\tImageOutCodec   types.MimeType\n}\n\nfunc (p *PipelineConfig) GetImageConfigs() []*ImageConfig {\n\to := p.Outputs[types.EgressTypeImages]\n\n\tvar configs []*ImageConfig\n\tfor _, c := range o {\n\t\tconfigs = append(configs, c.(*ImageConfig))\n\t}\n\n\treturn configs\n}\n\nfunc (p *PipelineConfig) getImageConfig(images *livekit.ImageOutput, upload egress.UploadRequest) (*ImageConfig, error) {\n\toutCodec, outputType, err := getMimeTypes(images.ImageCodec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsc, err := p.getStorageConfig(upload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfilenamePrefix := clean(images.FilenamePrefix)\n\tconf := &ImageConfig{\n\t\toutputConfig: outputConfig{\n\t\t\tOutputType: outputType,\n\t\t},\n\n\t\tId: utils.NewGuid(\"\"),\n\t\tImagesInfo: &livekit.ImagesInfo{\n\t\t\tFilenamePrefix: filenamePrefix,\n\t\t},\n\t\tImagePrefix:     filenamePrefix,\n\t\tImageSuffix:     images.FilenameSuffix,\n\t\tDisableManifest: images.DisableManifest,\n\t\tStorageConfig:   sc,\n\t\tCaptureInterval: images.CaptureInterval,\n\t\tWidth:           images.Width,\n\t\tHeight:          images.Height,\n\t\tImageOutCodec:   outCodec,\n\t}\n\n\tif conf.CaptureInterval == 0 {\n\t\t// 10s by default\n\t\tconf.CaptureInterval = 10\n\t}\n\n\t// Set default dimensions for RoomComposite and Web. For all SDKs input, default will be\n\t// set from the track dimensions\n\tswitch req := p.Info.Request.(type) {\n\tcase *livekit.EgressInfo_RoomComposite, *livekit.EgressInfo_Web:\n\t\tif conf.Width == 0 {\n\t\t\tconf.Width = p.Width\n\t\t}\n\t\tif conf.Height == 0 {\n\t\t\tconf.Height = p.Height\n\t\t}\n\tcase *livekit.EgressInfo_Replay:\n\t\tswitch req.Replay.Source.(type) {\n\t\tcase *livekit.ExportReplayRequest_Template, *livekit.ExportReplayRequest_Web:\n\t\t\tif conf.Width == 0 {\n\t\t\t\tconf.Width = p.Width\n\t\t\t}\n\t\t\tif conf.Height == 0 {\n\t\t\t\tconf.Height = p.Height\n\t\t\t}\n\t\t}\n\t}\n\n\t// filename\n\terr = conf.updatePrefix(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn conf, nil\n}\n\nfunc (o *ImageConfig) updatePrefix(p *PipelineConfig) error {\n\tidentifier, replacements := p.getFilenameInfo()\n\n\to.ImagePrefix = stringReplace(o.ImagePrefix, replacements)\n\to.ImagesInfo.FilenamePrefix = stringReplace(o.ImagesInfo.FilenamePrefix, replacements)\n\to.ImageExtension = types.FileExtensionForOutputType[o.OutputType]\n\n\timagesDir, imagesPrefix := path.Split(o.ImagePrefix)\n\to.StorageDir = imagesDir\n\n\t// ensure playlistName\n\tif imagesPrefix == \"\" {\n\t\timagesPrefix = fmt.Sprintf(\"%s-%s\", identifier, time.Now().Format(\"2006-01-02T150405\"))\n\t\tif p.Info.RetryCount > 0 {\n\t\t\timagesPrefix = fmt.Sprintf(\"%s-%d\", imagesPrefix, p.Info.RetryCount)\n\t\t}\n\t}\n\n\t// update config\n\to.ImagePrefix = imagesPrefix\n\n\t// Prepend the configuration base directory and the egress Id, and slug to prevent conflict if\n\t// there is more than one image output\n\t// os.ModeDir creates a directory with mode 000 when mapping the directory outside the container\n\to.LocalDir = path.Join(p.TmpDir, o.Id)\n\treturn os.MkdirAll(o.LocalDir, 0755)\n}\n\nfunc getMimeTypes(imageCodec livekit.ImageCodec) (types.MimeType, types.OutputType, error) {\n\tswitch imageCodec {\n\tcase livekit.ImageCodec_IC_DEFAULT, livekit.ImageCodec_IC_JPEG:\n\t\treturn types.MimeTypeJPEG, types.OutputTypeJPEG, nil\n\tdefault:\n\t\treturn \"\", \"\", errors.ErrNoCompatibleCodec\n\t}\n}\n"
  },
  {
    "path": "pkg/config/output_segment.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/egress\"\n\t\"github.com/livekit/protocol/livekit\"\n)\n\ntype SegmentConfig struct {\n\toutputConfig\n\n\tSegmentsInfo         *livekit.SegmentsInfo\n\tLocalDir             string\n\tStorageDir           string\n\tPlaylistFilename     string\n\tLivePlaylistFilename string\n\tSegmentPrefix        string\n\tSegmentSuffix        livekit.SegmentedFileSuffix\n\tSegmentDuration      int\n\n\tDisableManifest bool\n\tStorageConfig   *StorageConfig\n}\n\nfunc (p *PipelineConfig) GetSegmentConfig() *SegmentConfig {\n\to, ok := p.Outputs[types.EgressTypeSegments]\n\tif !ok || len(o) == 0 {\n\t\treturn nil\n\t}\n\treturn o[0].(*SegmentConfig)\n}\n\n// segments should always be added last, so we can check keyframe interval from file/stream\nfunc (p *PipelineConfig) getSegmentConfig(segments *livekit.SegmentedFileOutput, upload egress.UploadRequest) (*SegmentConfig, error) {\n\tsc, err := p.getStorageConfig(upload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprefix := clean(segments.FilenamePrefix)\n\tplaylist := clean(segments.PlaylistName)\n\n\t// On retry, segment filenames are \"{prefix}_{index}.ts\" so prefix must contain {retry}\n\t// to avoid overwriting. When prefix is empty it derives from playlist name, so playlist\n\t// must contain {retry}. When both are empty, names are auto-generated with retry count.\n\tif p.Info.RetryCount > 0 {\n\t\tif prefix != \"\" && !strings.Contains(prefix, \"{retry}\") {\n\t\t\treturn nil, errors.ErrNonRetryableOutput\n\t\t}\n\t\tif prefix == \"\" && playlist != \"\" && !strings.Contains(playlist, \"{retry}\") {\n\t\t\treturn nil, errors.ErrNonRetryableOutput\n\t\t}\n\t}\n\n\tconf := &SegmentConfig{\n\t\tSegmentsInfo:         &livekit.SegmentsInfo{},\n\t\tSegmentPrefix:        prefix,\n\t\tSegmentSuffix:        segments.FilenameSuffix,\n\t\tPlaylistFilename:     playlist,\n\t\tLivePlaylistFilename: clean(segments.LivePlaylistName),\n\t\tSegmentDuration:      int(segments.SegmentDuration),\n\t\tDisableManifest:      segments.DisableManifest,\n\t\tStorageConfig:        sc,\n\t}\n\n\tif conf.SegmentDuration == 0 {\n\t\tconf.SegmentDuration = 4\n\t}\n\n\tswitch segments.Protocol {\n\tcase livekit.SegmentedFileProtocol_DEFAULT_SEGMENTED_FILE_PROTOCOL,\n\t\tlivekit.SegmentedFileProtocol_HLS_PROTOCOL:\n\t\tconf.OutputType = types.OutputTypeHLS\n\t}\n\n\t// filename\n\tif err = conf.updatePrefixAndPlaylist(p); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn conf, nil\n}\n\nfunc removeKnownExtension(filename string) string {\n\tif extIdx := strings.LastIndex(filename, \".\"); extIdx > -1 {\n\t\texistingExt := types.FileExtension(filename[extIdx:])\n\t\tif _, ok := types.FileExtensions[existingExt]; ok {\n\t\t\tfilename = filename[:extIdx]\n\t\t}\n\t\tfilename = filename[:extIdx]\n\t}\n\n\treturn filename\n}\n\nfunc (o *SegmentConfig) updatePrefixAndPlaylist(p *PipelineConfig) error {\n\tidentifier, replacements := p.getFilenameInfo()\n\n\to.SegmentPrefix = stringReplace(o.SegmentPrefix, replacements)\n\to.PlaylistFilename = stringReplace(o.PlaylistFilename, replacements)\n\to.LivePlaylistFilename = stringReplace(o.LivePlaylistFilename, replacements)\n\n\text := types.FileExtensionForOutputType[o.OutputType]\n\n\tplaylistDir, playlistName := path.Split(o.PlaylistFilename)\n\tlivePlaylistDir, livePlaylistName := path.Split(o.LivePlaylistFilename)\n\tsegmentDir, segmentPrefix := path.Split(o.SegmentPrefix)\n\n\t// force live playlist to be in the same directory as the main playlist\n\tif livePlaylistDir != \"\" && livePlaylistDir != playlistDir {\n\t\treturn errors.ErrInvalidInput(\"live_playlist_name must be in same directory as playlist_name\")\n\t}\n\n\t// remove extension from playlist name\n\tplaylistName = removeKnownExtension(playlistName)\n\tlivePlaylistName = removeKnownExtension(livePlaylistName)\n\n\t// only keep segmentDir if it is a subdirectory of playlistDir\n\tif segmentDir != \"\" {\n\t\tswitch playlistDir {\n\t\tcase segmentDir:\n\t\t\tsegmentDir = \"\"\n\t\tcase \"\":\n\t\t\tplaylistDir = segmentDir\n\t\t\tsegmentDir = \"\"\n\t\t}\n\t}\n\to.StorageDir = playlistDir\n\n\t// ensure playlistName\n\tif playlistName == \"\" {\n\t\tif segmentPrefix != \"\" {\n\t\t\tplaylistName = segmentPrefix\n\t\t} else {\n\t\t\tplaylistName = fmt.Sprintf(\"%s-%s\", identifier, time.Now().Format(\"2006-01-02T150405\"))\n\t\t\tif p.Info.RetryCount > 0 {\n\t\t\t\tplaylistName = fmt.Sprintf(\"%s-%d\", playlistName, p.Info.RetryCount)\n\t\t\t}\n\t\t}\n\t}\n\t// live playlist disabled by default\n\n\t// ensure filePrefix\n\tif segmentPrefix == \"\" {\n\t\tsegmentPrefix = playlistName\n\t}\n\n\t// update config\n\to.StorageDir = playlistDir\n\to.PlaylistFilename = fmt.Sprintf(\"%s%s\", playlistName, ext)\n\tif livePlaylistName != \"\" {\n\t\to.LivePlaylistFilename = fmt.Sprintf(\"%s%s\", livePlaylistName, ext)\n\t}\n\to.SegmentPrefix = fmt.Sprintf(\"%s%s\", segmentDir, segmentPrefix)\n\n\tif o.PlaylistFilename == o.LivePlaylistFilename {\n\t\treturn errors.ErrInvalidInput(\"live_playlist_name cannot be identical to playlist_name\")\n\t}\n\n\t// Prepend the configuration base directory and the egress Id\n\t// os.ModeDir creates a directory with mode 000 when mapping the directory outside the container\n\to.LocalDir = p.TmpDir\n\tif segmentDir != \"\" {\n\t\tif err := os.MkdirAll(path.Join(o.LocalDir, segmentDir), 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\to.SegmentsInfo.PlaylistName = path.Join(o.StorageDir, o.PlaylistFilename)\n\tif o.LivePlaylistFilename != \"\" {\n\t\to.SegmentsInfo.LivePlaylistName = path.Join(o.StorageDir, o.LivePlaylistFilename)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/config/output_stream.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\ntype StreamConfig struct {\n\toutputConfig\n\n\t// url -> Stream\n\tStreams sync.Map\n\n\ttwitchTemplate string\n}\n\ntype Stream struct {\n\tName        string // gstreamer stream ID\n\tParsedUrl   string // parsed/validated url\n\tRedactedUrl string // url with stream key removed\n\tStreamID    string // stream ID used by rtmpconnection\n\tStreamInfo  *livekit.StreamInfo\n\n\tlastRetryUpdate atomic.Int64\n}\n\nfunc (p *PipelineConfig) GetStreamConfig() *StreamConfig {\n\to, ok := p.Outputs[types.EgressTypeStream]\n\tif !ok || len(o) == 0 {\n\t\treturn nil\n\t}\n\treturn o[0].(*StreamConfig)\n}\n\nfunc (p *PipelineConfig) GetWebsocketConfig() *StreamConfig {\n\to, ok := p.Outputs[types.EgressTypeWebsocket]\n\tif !ok || len(o) == 0 {\n\t\treturn nil\n\t}\n\treturn o[0].(*StreamConfig)\n}\n\nfunc (p *PipelineConfig) getStreamConfig(outputType types.OutputType, urls []string) (*StreamConfig, error) {\n\tconf := &StreamConfig{\n\t\toutputConfig: outputConfig{OutputType: outputType},\n\t}\n\n\tfor _, rawUrl := range urls {\n\t\t_, err := conf.AddStream(rawUrl, outputType)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tswitch outputType {\n\tcase types.OutputTypeRTMP, types.OutputTypeSRT:\n\t\tp.AudioOutCodec = types.MimeTypeAAC\n\t\tp.VideoOutCodec = types.MimeTypeH264\n\n\tcase types.OutputTypeRaw:\n\t\tp.AudioOutCodec = types.MimeTypeRawAudio\n\t}\n\n\treturn conf, nil\n}\n\nfunc (s *Stream) UpdateEndTime(endedAt int64) {\n\ts.StreamInfo.EndedAt = endedAt\n\tif s.StreamInfo.StartedAt == 0 {\n\t\tif s.StreamInfo.Status != livekit.StreamInfo_FAILED {\n\t\t\tlogger.Warnw(\"stream missing start time\", nil, \"url\", s.RedactedUrl)\n\t\t}\n\t\ts.StreamInfo.StartedAt = endedAt\n\t} else {\n\t\ts.StreamInfo.Duration = endedAt - s.StreamInfo.StartedAt\n\t}\n}\n\nfunc (s *Stream) ShouldSendRetryUpdate(now time.Time, minInterval time.Duration) bool {\n\tlast := s.lastRetryUpdate.Load()\n\tif last == 0 || now.UnixNano()-last >= int64(minInterval) {\n\t\ts.lastRetryUpdate.Store(now.UnixNano())\n\t\treturn true\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "pkg/config/pipeline.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/go-gst/go-gst/gst/app\"\n\t\"github.com/pion/webrtc/v4\"\n\t\"go.opentelemetry.io/otel\"\n\t\"go.uber.org/atomic\"\n\t\"google.golang.org/protobuf/proto\"\n\t\"gopkg.in/yaml.v3\"\n\n\t\"github.com/livekit/protocol/egress\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/rpc\"\n\tlksdk \"github.com/livekit/server-sdk-go/v2\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/pipeline/tempo\"\n\t\"github.com/livekit/egress/pkg/types\"\n)\n\ntype PipelineConfig struct {\n\tBaseConfig `yaml:\",inline\"`\n\n\tHandlerID string `yaml:\"handler_id\"`\n\tTmpDir    string `yaml:\"tmp_dir\"`\n\n\ttypes.RequestType `yaml:\"-\"`\n\tSourceConfig      `yaml:\"-\"`\n\tAudioConfig       `yaml:\"-\"`\n\tVideoConfig       `yaml:\"-\"`\n\n\tOutputs              map[types.EgressType][]OutputConfig `yaml:\"-\"`\n\tOutputCount          atomic.Int32                        `yaml:\"-\"`\n\tFinalizationRequired bool                                `yaml:\"-\"`\n\n\tInfo            *livekit.EgressInfo `yaml:\"-\"`\n\tManifest        *Manifest           `yaml:\"-\"`\n\tLive            bool                `yaml:\"-\"`\n\tStorageObserver StorageObserver     `yaml:\"-\"`\n}\n\n// IsReplay returns true when this is a replay/export pipeline. Use this for\n// replay-specific integration points (IPC calls, storage access). For generic\n// pipeline behavior (is-live, leaky queues, backpressure) use the Live field.\nfunc (p *PipelineConfig) IsReplay() bool {\n\treturn !p.Live\n}\n\ntype StorageObserver interface {\n\tOnStorageEvent(egressID, operation, path string, size, lifetimeDays int64)\n}\n\nvar (\n\ttracer = otel.Tracer(\"github.com/livekit/egress/pkg/config\")\n)\n\ntype SourceConfig struct {\n\tSourceType types.SourceType\n\tWebSourceParams\n\tSDKSourceParams\n}\n\ntype WebSourceParams struct {\n\tAwaitStartSignal bool\n\tDisplay          string\n\tLayout           string\n\tToken            string\n\tBaseUrl          string\n\tWebUrl           string\n}\n\ntype SDKSourceParams struct {\n\tTrackID      string\n\tAudioTrackID string\n\tVideoTrackID string\n\tIdentity     string\n\tTrackSource  string\n\tTrackKind    string\n\tScreenShare  bool\n\tVideoInCodec types.MimeType\n\tAudioTracks  []*TrackSource\n\tVideoTrack   *TrackSource\n\tAudioRoutes  []AudioRouteConfig\n}\n\ntype AudioRouteConfig struct {\n\tMatch   AudioRouteMatch\n\tChannel livekit.AudioChannel\n}\n\ntype AudioRouteMatch struct {\n\tTrackID             string\n\tParticipantIdentity string\n\tParticipantKind     *lksdk.ParticipantKind\n}\n\ntype TrackSource struct {\n\tTrackID            string\n\tTrackKind          lksdk.TrackKind\n\tParticipantKind    lksdk.ParticipantKind\n\tAudioChannel       *livekit.AudioChannel\n\tAppSrc             *app.Source\n\tMimeType           types.MimeType\n\tPayloadType        webrtc.PayloadType\n\tClockRate          uint32\n\tTempoController    *tempo.Controller\n\tOnKeyframeRequired func()\n}\n\ntype AudioConfig struct {\n\tAudioEnabled     bool\n\tAudioTranscoding bool\n\tAudioOutCodec    types.MimeType\n\tAudioBitrate     int32\n\tAudioFrequency   int32\n\tAudioMixing      livekit.AudioMixing\n}\n\ntype VideoConfig struct {\n\tVideoEnabled     bool\n\tVideoDecoding    bool\n\tVideoEncoding    bool\n\tVideoOutCodec    types.MimeType\n\tVideoProfile     types.Profile\n\tWidth            int32\n\tHeight           int32\n\tDepth            int32\n\tFramerate        int32\n\tVideoBitrate     int32\n\tKeyFrameInterval float64\n}\n\nfunc NewPipelineConfig(confString string, req *rpc.StartEgressRequest) (*PipelineConfig, error) {\n\tp := &PipelineConfig{\n\t\tBaseConfig: BaseConfig{\n\t\t\tLogging: &logger.Config{\n\t\t\t\tLevel: \"info\",\n\t\t\t},\n\t\t},\n\t\tOutputs: make(map[types.EgressType][]OutputConfig),\n\t\tLive:    true,\n\t}\n\n\tif err := yaml.Unmarshal([]byte(confString), p); err != nil {\n\t\treturn nil, errors.ErrCouldNotParseConfig(err)\n\t}\n\n\tif err := p.initLogger(\n\t\t\"nodeID\", p.NodeID,\n\t\t\"handlerID\", p.HandlerID,\n\t\t\"clusterID\", p.ClusterID,\n\t\t\"egressID\", req.EgressId,\n\t); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn p, p.Update(req)\n}\n\nfunc GetValidatedPipelineConfig(conf *ServiceConfig, req *rpc.StartEgressRequest) (*PipelineConfig, error) {\n\t_, span := tracer.Start(context.Background(), \"config.GetValidatedPipelineConfig\")\n\tdefer span.End()\n\n\tp := &PipelineConfig{\n\t\tBaseConfig: conf.BaseConfig,\n\t\tTmpDir:     path.Join(TmpDir, req.EgressId),\n\t\tOutputs:    make(map[types.EgressType][]OutputConfig),\n\t\tLive:       true,\n\t}\n\n\treturn p, p.Update(req)\n}\n\nfunc (p *PipelineConfig) Update(request *rpc.StartEgressRequest) error {\n\tif request.EgressId == \"\" {\n\t\treturn errors.ErrInvalidInput(\"egressID\")\n\t}\n\n\t// start with defaults\n\tnow := time.Now().UnixNano()\n\tp.Info = &livekit.EgressInfo{\n\t\tEgressId:   request.EgressId,\n\t\tRoomId:     request.RoomId,\n\t\tRoomName:   request.RoomName,\n\t\tStatus:     livekit.EgressStatus_EGRESS_STARTING,\n\t\tStartedAt:  now,\n\t\tUpdatedAt:  now,\n\t\tRetryCount: request.RetryCount,\n\t}\n\n\tp.AudioConfig = AudioConfig{\n\t\tAudioBitrate:   128,\n\t\tAudioFrequency: 44100,\n\t}\n\tp.VideoConfig = VideoConfig{\n\t\tVideoProfile: types.ProfileMain,\n\t\tWidth:        1280,\n\t\tHeight:       720,\n\t\tDepth:        24,\n\t\tFramerate:    30,\n\t\tVideoBitrate: 3000,\n\t}\n\n\tconnectionInfoRequired := true\n\tswitch req := request.Request.(type) {\n\tcase *rpc.StartEgressRequest_RoomComposite:\n\t\tp.RequestType = types.RequestTypeRoomComposite\n\t\tclone := proto.Clone(req.RoomComposite).(*livekit.RoomCompositeEgressRequest)\n\t\tp.Info.Request = &livekit.EgressInfo_RoomComposite{\n\t\t\tRoomComposite: clone,\n\t\t}\n\t\tegress.RedactEncodedOutputs(clone)\n\n\t\tif ShouldUseSDKSource(req.RoomComposite) {\n\t\t\tp.AudioMixing = req.RoomComposite.AudioMixing\n\t\t\tp.SourceType = types.SourceTypeSDK\n\t\t} else {\n\t\t\tp.SourceType = types.SourceTypeWeb\n\t\t}\n\n\t\tp.AwaitStartSignal = true\n\n\t\tp.Info.RoomName = req.RoomComposite.RoomName\n\t\tp.Layout = req.RoomComposite.Layout\n\t\tif req.RoomComposite.CustomBaseUrl != \"\" {\n\t\t\tp.BaseUrl = req.RoomComposite.CustomBaseUrl\n\t\t} else {\n\t\t\tp.BaseUrl = p.TemplateBase\n\t\t}\n\t\tbaseUrl, err := url.Parse(p.BaseUrl)\n\t\tif err != nil || !isHttp(baseUrl) {\n\t\t\treturn errors.ErrInvalidInput(\"template base url\")\n\t\t}\n\n\t\tif !req.RoomComposite.VideoOnly {\n\t\t\tp.AudioEnabled = true\n\t\t\tp.AudioTranscoding = true\n\t\t}\n\t\tif !req.RoomComposite.AudioOnly {\n\t\t\tp.VideoEnabled = true\n\t\t\tp.VideoInCodec = types.MimeTypeRawVideo\n\t\t\tp.VideoDecoding = true\n\t\t}\n\t\tif !p.AudioEnabled && !p.VideoEnabled {\n\t\t\treturn errors.ErrInvalidInput(\"audio_only and video_only\")\n\t\t}\n\n\t\t// encoding options\n\t\tswitch opts := req.RoomComposite.Options.(type) {\n\t\tcase *livekit.RoomCompositeEgressRequest_Preset:\n\t\t\tp.applyPreset(opts.Preset)\n\n\t\tcase *livekit.RoomCompositeEgressRequest_Advanced:\n\t\t\tif err = p.applyAdvanced(opts.Advanced); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// output params\n\t\tif err = p.updateEncodedOutputs(req.RoomComposite); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase *rpc.StartEgressRequest_Web:\n\t\tp.RequestType = types.RequestTypeWeb\n\t\tclone := proto.Clone(req.Web).(*livekit.WebEgressRequest)\n\t\tp.Info.Request = &livekit.EgressInfo_Web{\n\t\t\tWeb: clone,\n\t\t}\n\t\tegress.RedactEncodedOutputs(clone)\n\n\t\tconnectionInfoRequired = false\n\t\tp.SourceType = types.SourceTypeWeb\n\t\tp.AwaitStartSignal = req.Web.AwaitStartSignal\n\n\t\tp.WebUrl = req.Web.Url\n\t\twebUrl, err := url.Parse(p.WebUrl)\n\t\tif err != nil || !isHttp(webUrl) {\n\t\t\treturn errors.ErrInvalidInput(\"web url\")\n\t\t}\n\n\t\tif !req.Web.VideoOnly {\n\t\t\tp.AudioEnabled = true\n\t\t\tp.AudioTranscoding = true\n\t\t}\n\t\tif !req.Web.AudioOnly {\n\t\t\tp.VideoEnabled = true\n\t\t\tp.VideoInCodec = types.MimeTypeRawVideo\n\t\t\tp.VideoDecoding = true\n\t\t}\n\t\tif !p.AudioEnabled && !p.VideoEnabled {\n\t\t\treturn errors.ErrInvalidInput(\"audio_only and video_only\")\n\t\t}\n\n\t\t// encoding options\n\t\tswitch opts := req.Web.Options.(type) {\n\t\tcase *livekit.WebEgressRequest_Preset:\n\t\t\tp.applyPreset(opts.Preset)\n\n\t\tcase *livekit.WebEgressRequest_Advanced:\n\t\t\tif err = p.applyAdvanced(opts.Advanced); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// output params\n\t\tif err = p.updateEncodedOutputs(req.Web); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase *rpc.StartEgressRequest_Participant:\n\t\tp.RequestType = types.RequestTypeParticipant\n\t\tclone := proto.Clone(req.Participant).(*livekit.ParticipantEgressRequest)\n\t\tp.Info.Request = &livekit.EgressInfo_Participant{\n\t\t\tParticipant: clone,\n\t\t}\n\t\tegress.RedactEncodedOutputs(clone)\n\n\t\tp.SourceType = types.SourceTypeSDK\n\n\t\tp.Info.RoomName = req.Participant.RoomName\n\t\tp.AudioEnabled = true\n\t\tp.AudioTranscoding = true\n\t\tp.VideoEnabled = true\n\t\tp.VideoDecoding = true\n\t\tp.Identity = req.Participant.Identity\n\t\tp.ScreenShare = req.Participant.ScreenShare\n\t\tif p.Identity == \"\" {\n\t\t\treturn errors.ErrInvalidInput(\"identity\")\n\t\t}\n\n\t\t// encoding options\n\t\tswitch opts := req.Participant.Options.(type) {\n\t\tcase *livekit.ParticipantEgressRequest_Preset:\n\t\t\tp.applyPreset(opts.Preset)\n\n\t\tcase *livekit.ParticipantEgressRequest_Advanced:\n\t\t\tif err := p.applyAdvanced(opts.Advanced); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// output params\n\t\tif err := p.updateEncodedOutputs(req.Participant); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase *rpc.StartEgressRequest_TrackComposite:\n\t\tp.RequestType = types.RequestTypeTrackComposite\n\t\tclone := proto.Clone(req.TrackComposite).(*livekit.TrackCompositeEgressRequest)\n\t\tp.Info.Request = &livekit.EgressInfo_TrackComposite{\n\t\t\tTrackComposite: clone,\n\t\t}\n\t\tegress.RedactEncodedOutputs(clone)\n\n\t\tp.SourceType = types.SourceTypeSDK\n\n\t\tp.Info.RoomName = req.TrackComposite.RoomName\n\t\tif audioTrackID := req.TrackComposite.AudioTrackId; audioTrackID != \"\" {\n\t\t\tp.AudioEnabled = true\n\t\t\tp.AudioTrackID = audioTrackID\n\t\t\tp.AudioTranscoding = true\n\t\t}\n\t\tif videoTrackID := req.TrackComposite.VideoTrackId; videoTrackID != \"\" {\n\t\t\tp.VideoEnabled = true\n\t\t\tp.VideoTrackID = videoTrackID\n\t\t\tp.VideoDecoding = true\n\t\t}\n\t\tif !p.AudioEnabled && !p.VideoEnabled {\n\t\t\treturn errors.ErrInvalidInput(\"audio_track_id or video_track_id\")\n\t\t}\n\n\t\t// encoding options\n\t\tswitch opts := req.TrackComposite.Options.(type) {\n\t\tcase *livekit.TrackCompositeEgressRequest_Preset:\n\t\t\tp.applyPreset(opts.Preset)\n\n\t\tcase *livekit.TrackCompositeEgressRequest_Advanced:\n\t\t\tif err := p.applyAdvanced(opts.Advanced); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// output params\n\t\tif err := p.updateEncodedOutputs(req.TrackComposite); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase *rpc.StartEgressRequest_Track:\n\t\tp.RequestType = types.RequestTypeTrack\n\t\tclone := proto.Clone(req.Track).(*livekit.TrackEgressRequest)\n\t\tp.Info.Request = &livekit.EgressInfo_Track{\n\t\t\tTrack: clone,\n\t\t}\n\t\tegress.RedactDirectOutputs(clone)\n\n\t\tp.SourceType = types.SourceTypeSDK\n\n\t\tp.Info.RoomName = req.Track.RoomName\n\t\tp.TrackID = req.Track.TrackId\n\t\tif p.TrackID == \"\" {\n\t\t\treturn errors.ErrInvalidInput(\"track_id\")\n\t\t}\n\n\t\tif err := p.updateDirectOutput(req.Track); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase *rpc.StartEgressRequest_Replay:\n\t\treplayReq := req.Replay\n\t\tclone := proto.Clone(replayReq).(*livekit.ExportReplayRequest)\n\t\tp.Info.Request = &livekit.EgressInfo_Replay{\n\t\t\tReplay: clone,\n\t\t}\n\t\tegress.RedactStartEgressRequest(clone)\n\n\t\tswitch source := replayReq.Source.(type) {\n\t\tcase *livekit.ExportReplayRequest_Template:\n\t\t\ttmpl := source.Template\n\t\t\tp.RequestType = types.RequestTypeTemplate\n\n\t\t\tif ShouldUseSDKSource(tmpl) {\n\t\t\t\tp.SourceType = types.SourceTypeSDK\n\t\t\t} else {\n\t\t\t\tp.SourceType = types.SourceTypeWeb\n\t\t\t}\n\t\t\tp.AwaitStartSignal = true\n\n\t\t\tp.Layout = tmpl.Layout\n\t\t\tif tmpl.CustomBaseUrl != \"\" {\n\t\t\t\tp.BaseUrl = tmpl.CustomBaseUrl\n\t\t\t} else {\n\t\t\t\tp.BaseUrl = p.TemplateBase\n\t\t\t}\n\t\t\tbaseUrl, err := url.Parse(p.BaseUrl)\n\t\t\tif err != nil || !isHttp(baseUrl) {\n\t\t\t\treturn errors.ErrInvalidInput(\"template base url\")\n\t\t\t}\n\n\t\t\tif !tmpl.VideoOnly {\n\t\t\t\tp.AudioEnabled = true\n\t\t\t\tp.AudioTranscoding = true\n\t\t\t}\n\t\t\tif !tmpl.AudioOnly {\n\t\t\t\tp.VideoEnabled = true\n\t\t\t\tp.VideoInCodec = types.MimeTypeRawVideo\n\t\t\t\tp.VideoDecoding = true\n\t\t\t}\n\t\t\tif !p.AudioEnabled && !p.VideoEnabled {\n\t\t\t\treturn errors.ErrInvalidInput(\"audio_only and video_only\")\n\t\t\t}\n\n\t\tcase *livekit.ExportReplayRequest_Web:\n\t\t\tweb := source.Web\n\t\t\tp.RequestType = types.RequestTypeWeb\n\t\t\tconnectionInfoRequired = false\n\t\t\tp.SourceType = types.SourceTypeWeb\n\t\t\tp.AwaitStartSignal = web.AwaitStartSignal\n\n\t\t\tp.WebUrl = web.Url\n\t\t\twebUrl, err := url.Parse(p.WebUrl)\n\t\t\tif err != nil || !isHttp(webUrl) {\n\t\t\t\treturn errors.ErrInvalidInput(\"web url\")\n\t\t\t}\n\n\t\t\tif !web.VideoOnly {\n\t\t\t\tp.AudioEnabled = true\n\t\t\t\tp.AudioTranscoding = true\n\t\t\t}\n\t\t\tif !web.AudioOnly {\n\t\t\t\tp.VideoEnabled = true\n\t\t\t\tp.VideoInCodec = types.MimeTypeRawVideo\n\t\t\t\tp.VideoDecoding = true\n\t\t\t}\n\t\t\tif !p.AudioEnabled && !p.VideoEnabled {\n\t\t\t\treturn errors.ErrInvalidInput(\"audio_only and video_only\")\n\t\t\t}\n\n\t\tcase *livekit.ExportReplayRequest_Media:\n\t\t\tmedia := source.Media\n\t\t\tp.RequestType = types.RequestTypeMedia\n\t\t\tp.SourceType = types.SourceTypeSDK\n\n\t\t\t// data config not yet supported\n\t\t\tif media.Data != nil {\n\t\t\t\treturn errors.ErrFeatureDisabled(\"data track egress\")\n\t\t\t}\n\n\t\t\t// video\n\t\t\tswitch v := media.Video.(type) {\n\t\t\tcase *livekit.MediaSource_VideoTrackId:\n\t\t\t\tp.VideoEnabled = true\n\t\t\t\tp.VideoDecoding = true\n\t\t\t\tp.VideoTrackID = v.VideoTrackId\n\t\t\tcase *livekit.MediaSource_ParticipantVideo:\n\t\t\t\tp.VideoEnabled = true\n\t\t\t\tp.VideoDecoding = true\n\t\t\t\tp.Identity = v.ParticipantVideo.Identity\n\t\t\t\tp.ScreenShare = v.ParticipantVideo.PreferScreenShare\n\t\t\t}\n\n\t\t\t// audio\n\t\t\tif media.Audio != nil {\n\t\t\t\tp.AudioEnabled = true\n\t\t\t\tp.AudioTranscoding = true\n\t\t\t\tfor _, route := range media.Audio.Routes {\n\t\t\t\t\tarc := AudioRouteConfig{\n\t\t\t\t\t\tChannel: route.Channel,\n\t\t\t\t\t}\n\t\t\t\t\tswitch m := route.Match.(type) {\n\t\t\t\t\tcase *livekit.AudioRoute_TrackId:\n\t\t\t\t\t\tarc.Match.TrackID = m.TrackId\n\t\t\t\t\tcase *livekit.AudioRoute_ParticipantIdentity:\n\t\t\t\t\t\tarc.Match.ParticipantIdentity = m.ParticipantIdentity\n\t\t\t\t\tcase *livekit.AudioRoute_ParticipantKind:\n\t\t\t\t\t\tkind := lksdk.ParticipantKind(m.ParticipantKind)\n\t\t\t\t\t\tarc.Match.ParticipantKind = &kind\n\t\t\t\t\t}\n\t\t\t\t\tp.AudioRoutes = append(p.AudioRoutes, arc)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !p.AudioEnabled && !p.VideoEnabled {\n\t\t\t\treturn errors.ErrInvalidInput(\"audio or video\")\n\t\t\t}\n\n\t\tdefault:\n\t\t\treturn errors.ErrInvalidInput(\"source\")\n\t\t}\n\n\t\t// encoding options\n\t\tswitch opts := replayReq.Encoding.(type) {\n\t\tcase *livekit.ExportReplayRequest_Preset:\n\t\t\tp.applyPreset(opts.Preset)\n\t\tcase *livekit.ExportReplayRequest_Advanced:\n\t\t\tif err := p.applyAdvanced(opts.Advanced); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// output params\n\t\tif err := p.updateOutputs(replayReq); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tdefault:\n\t\treturn errors.ErrInvalidInput(\"request\")\n\t}\n\n\tswitch p.SourceType {\n\tcase types.SourceTypeWeb:\n\t\tp.Info.SourceType = livekit.EgressSourceType_EGRESS_SOURCE_TYPE_WEB\n\tcase types.SourceTypeSDK:\n\t\tp.Info.SourceType = livekit.EgressSourceType_EGRESS_SOURCE_TYPE_SDK\n\t}\n\n\t// connection info\n\tif connectionInfoRequired {\n\t\t// token\n\t\tif request.Token != \"\" {\n\t\t\tp.Token = request.Token\n\t\t} else if p.ApiKey != \"\" && p.ApiSecret != \"\" && p.Info.RoomName != \"\" {\n\t\t\ttoken, err := egress.BuildEgressToken(p.Info.EgressId, p.ApiKey, p.ApiSecret, p.Info.RoomName)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp.Token = token\n\t\t} else {\n\t\t\treturn errors.ErrInvalidInput(\"token or api key/secret\")\n\t\t}\n\n\t\t// url\n\t\tif request.WsUrl != \"\" {\n\t\t\tp.WsUrl = request.WsUrl\n\t\t} else if p.WsUrl == \"\" {\n\t\t\treturn errors.ErrInvalidInput(\"ws_url\")\n\t\t}\n\t}\n\n\tp.Latency = p.getLatencyConfig(p.RequestType)\n\tapplyLatencyDefaults(&p.Latency)\n\n\tif p.RequestType != types.RequestTypeTrack {\n\t\terr := p.validateAndUpdateOutputParams()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tp.initManifest()\n\treturn nil\n}\n\nfunc ShouldUseSDKSource(req interface {\n\tGetLayout() string\n\tGetAudioOnly() bool\n\tGetCustomBaseUrl() string\n}) bool {\n\treturn req.GetAudioOnly() && req.GetLayout() == \"\" && req.GetCustomBaseUrl() == \"\"\n}\n\nfunc (p *PipelineConfig) validateAndUpdateOutputParams() error {\n\tcompatibleAudioCodecs, compatibleVideoCodecs, err := p.validateAndUpdateOutputCodecs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Find a compatible file format if not set\n\terr = p.updateOutputType(compatibleAudioCodecs, compatibleVideoCodecs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Select a codec compatible with all outputs\n\tif p.AudioEnabled {\n\t\tfor _, o := range p.GetEncodedOutputs() {\n\n\t\t\tif compatibleAudioCodecs[types.DefaultAudioCodecs[o.GetOutputType()]] {\n\t\t\t\tp.AudioOutCodec = types.DefaultAudioCodecs[o.GetOutputType()]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif p.AudioOutCodec == \"\" {\n\t\t\t// No default codec found. Pick a random compatible one\n\t\t\tfor k := range compatibleAudioCodecs {\n\t\t\t\tp.AudioOutCodec = k\n\t\t\t}\n\t\t}\n\t}\n\n\tif p.VideoEnabled {\n\t\tfor _, o := range p.GetEncodedOutputs() {\n\t\t\tif compatibleVideoCodecs[types.DefaultVideoCodecs[o.GetOutputType()]] {\n\t\t\t\tp.VideoOutCodec = types.DefaultVideoCodecs[o.GetOutputType()]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif p.VideoOutCodec == \"\" {\n\t\t\t// No default codec found. Pick a random compatible one\n\t\t\tfor k := range compatibleVideoCodecs {\n\t\t\t\tp.VideoOutCodec = k\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (p *PipelineConfig) validateAndUpdateOutputCodecs() (compatibleAudioCodecs map[types.MimeType]bool, compatibleVideoCodecs map[types.MimeType]bool, err error) {\n\tcompatibleAudioCodecs = make(map[types.MimeType]bool)\n\tcompatibleVideoCodecs = make(map[types.MimeType]bool)\n\n\t// Find video and audio codecs compatible with all outputs\n\tif p.AudioEnabled {\n\t\tif p.AudioOutCodec == \"\" {\n\t\t\tcompatibleAudioCodecs = types.AllOutputAudioCodecs\n\t\t} else {\n\t\t\tcompatibleAudioCodecs[p.AudioOutCodec] = true\n\t\t}\n\n\t\tfor _, o := range p.GetEncodedOutputs() {\n\t\t\tcompatibleAudioCodecs = types.GetMapIntersection(compatibleAudioCodecs, types.CodecCompatibility[o.GetOutputType()])\n\t\t\tif len(compatibleAudioCodecs) == 0 {\n\t\t\t\tif p.AudioOutCodec == \"\" {\n\t\t\t\t\treturn nil, nil, errors.ErrNoCompatibleCodec\n\t\t\t\t}\n\t\t\t\t// Return a more specific error if a codec was provided\n\t\t\t\treturn nil, nil, errors.ErrIncompatible(o.GetOutputType(), p.AudioOutCodec)\n\n\t\t\t}\n\t\t}\n\t}\n\n\tif p.VideoEnabled {\n\t\tif p.VideoOutCodec == \"\" {\n\t\t\tcompatibleVideoCodecs = types.AllOutputVideoCodecs\n\t\t} else {\n\t\t\tcompatibleVideoCodecs[p.VideoOutCodec] = true\n\t\t}\n\n\t\tfor _, o := range p.GetEncodedOutputs() {\n\t\t\tcompatibleVideoCodecs = types.GetMapIntersection(compatibleVideoCodecs, types.CodecCompatibility[o.GetOutputType()])\n\t\t\tif len(compatibleVideoCodecs) == 0 {\n\t\t\t\tif p.AudioOutCodec == \"\" {\n\t\t\t\t\treturn nil, nil, errors.ErrNoCompatibleCodec\n\t\t\t\t}\n\t\t\t\t// Return a more specific error if a codec was provided\n\t\t\t\treturn nil, nil, errors.ErrIncompatible(o.GetOutputType(), p.VideoOutCodec)\n\n\t\t\t}\n\t\t}\n\t}\n\treturn compatibleAudioCodecs, compatibleVideoCodecs, nil\n}\n\nfunc (p *PipelineConfig) updateOutputType(compatibleAudioCodecs map[types.MimeType]bool, compatibleVideoCodecs map[types.MimeType]bool) error {\n\to := p.GetFileConfig()\n\tif o == nil || o.GetOutputType() != types.OutputTypeUnknownFile {\n\t\treturn nil\n\t}\n\n\tif !p.VideoEnabled {\n\t\tot := types.GetOutputTypeCompatibleWithCodecs(types.AudioOnlyFileOutputTypes, compatibleAudioCodecs, nil)\n\t\tif ot == types.OutputTypeUnknownFile {\n\t\t\treturn errors.ErrNoCompatibleFileOutputType\n\t\t}\n\t\to.OutputType = ot\n\t} else if !p.AudioEnabled {\n\t\tot := types.GetOutputTypeCompatibleWithCodecs(types.VideoOnlyFileOutputTypes, nil, compatibleVideoCodecs)\n\t\tif ot == types.OutputTypeUnknownFile {\n\t\t\treturn errors.ErrNoCompatibleFileOutputType\n\t\t}\n\t\to.OutputType = ot\n\t} else {\n\t\tot := types.GetOutputTypeCompatibleWithCodecs(types.AudioVideoFileOutputTypes, compatibleAudioCodecs, compatibleVideoCodecs)\n\t\tif ot == types.OutputTypeUnknownFile {\n\t\t\treturn errors.ErrNoCompatibleFileOutputType\n\t\t}\n\t\to.OutputType = ot\n\t}\n\n\tidentifier, replacements := p.getFilenameInfo()\n\terr := o.updateFilepath(p, identifier, replacements)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// UpdateInfoFromSDK - updates the pipeline config with the identifier, replacements, width, and height\nfunc (p *PipelineConfig) UpdateInfoFromSDK(identifier string, replacements map[string]string, w, h uint32) error {\n\tif p.Info.RetryCount > 0 {\n\t\treplacements[\"{retry}\"] = fmt.Sprintf(\"%d\", p.Info.RetryCount)\n\t}\n\tvar err error\n\tfor egressType, c := range p.Outputs {\n\t\tif len(c) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tswitch egressType {\n\t\tcase types.EgressTypeFile:\n\t\t\terr = c[0].(*FileConfig).updateFilepath(p, identifier, replacements)\n\n\t\tcase types.EgressTypeSegments:\n\t\t\to := c[0].(*SegmentConfig)\n\t\t\to.LocalDir = stringReplace(o.LocalDir, replacements)\n\t\t\to.StorageDir = stringReplace(o.StorageDir, replacements)\n\t\t\to.PlaylistFilename = stringReplace(o.PlaylistFilename, replacements)\n\t\t\to.LivePlaylistFilename = stringReplace(o.LivePlaylistFilename, replacements)\n\t\t\to.SegmentPrefix = stringReplace(o.SegmentPrefix, replacements)\n\t\t\to.SegmentsInfo.PlaylistName = stringReplace(o.SegmentsInfo.PlaylistName, replacements)\n\t\t\to.SegmentsInfo.LivePlaylistName = stringReplace(o.SegmentsInfo.LivePlaylistName, replacements)\n\n\t\tcase types.EgressTypeImages:\n\t\t\tfor _, ci := range c {\n\t\t\t\to := ci.(*ImageConfig)\n\t\t\t\to.LocalDir = stringReplace(o.LocalDir, replacements)\n\t\t\t\to.StorageDir = stringReplace(o.StorageDir, replacements)\n\t\t\t\to.ImagePrefix = stringReplace(o.ImagePrefix, replacements)\n\t\t\t\to.ImagesInfo.FilenamePrefix = stringReplace(o.ImagesInfo.FilenamePrefix, replacements)\n\t\t\t\tif o.Width == 0 {\n\t\t\t\t\tif w != 0 {\n\t\t\t\t\t\to.Width = int32(w)\n\t\t\t\t\t} else {\n\t\t\t\t\t\to.Width = p.Width\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif o.Height == 0 {\n\t\t\t\t\tif h != 0 {\n\t\t\t\t\t\to.Height = int32(h)\n\t\t\t\t\t} else {\n\t\t\t\t\t\to.Height = p.Height\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc (p *PipelineConfig) GetEncodedOutputs() []OutputConfig {\n\tret := make([]OutputConfig, 0)\n\n\tfor _, k := range []types.EgressType{types.EgressTypeFile, types.EgressTypeSegments, types.EgressTypeStream, types.EgressTypeWebsocket} {\n\t\tret = append(ret, p.Outputs[k]...)\n\t}\n\n\treturn ret\n}\n\nfunc isHttp(parsedUrl *url.URL) bool {\n\treturn parsedUrl.Scheme == \"http\" || parsedUrl.Scheme == \"https\"\n}\n\nfunc stringReplace(s string, replacements map[string]string) string {\n\tfor template, value := range replacements {\n\t\ts = strings.ReplaceAll(s, template, value)\n\t}\n\treturn s\n}\n"
  },
  {
    "path": "pkg/config/retry_test.go",
    "content": "// Copyright 2026 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n)\n\nfunc TestFileOutputRetrySafety(t *testing.T) {\n\tfor _, test := range []struct {\n\t\tname       string\n\t\tretryCount int32\n\t\tfilepath   string\n\t\texpectErr  bool\n\t}{\n\t\t{\n\t\t\tname:       \"first attempt with explicit path\",\n\t\t\tretryCount: 0,\n\t\t\tfilepath:   \"recordings/my-file.mp4\",\n\t\t\texpectErr:  false,\n\t\t},\n\t\t{\n\t\t\tname:       \"retry with empty path (auto-generated)\",\n\t\t\tretryCount: 1,\n\t\t\tfilepath:   \"\",\n\t\t\texpectErr:  false,\n\t\t},\n\t\t{\n\t\t\tname:       \"retry with directory path (auto-generated)\",\n\t\t\tretryCount: 1,\n\t\t\tfilepath:   \"recordings/\",\n\t\t\texpectErr:  false,\n\t\t},\n\t\t{\n\t\t\tname:       \"retry with {retry} placeholder\",\n\t\t\tretryCount: 1,\n\t\t\tfilepath:   \"recordings/my-file-{retry}.mp4\",\n\t\t\texpectErr:  false,\n\t\t},\n\t\t{\n\t\t\tname:       \"retry with explicit path missing {retry}\",\n\t\t\tretryCount: 1,\n\t\t\tfilepath:   \"recordings/my-file.mp4\",\n\t\t\texpectErr:  true,\n\t\t},\n\t\t{\n\t\t\tname:       \"retry with {retry} in directory part\",\n\t\t\tretryCount: 1,\n\t\t\tfilepath:   \"recordings/{retry}/my-file.mp4\",\n\t\t\texpectErr:  false,\n\t\t},\n\t\t{\n\t\t\tname:       \"second retry with {retry} placeholder\",\n\t\t\tretryCount: 2,\n\t\t\tfilepath:   \"recordings/my-file-{retry}.mp4\",\n\t\t\texpectErr:  false,\n\t\t},\n\t\t{\n\t\t\tname:       \"second retry with explicit path missing {retry}\",\n\t\t\tretryCount: 2,\n\t\t\tfilepath:   \"recordings/my-file.mp4\",\n\t\t\texpectErr:  true,\n\t\t},\n\t} {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tp := &PipelineConfig{\n\t\t\t\tInfo:    &livekit.EgressInfo{RoomName: \"test-room\", RetryCount: test.retryCount},\n\t\t\t\tTmpDir:  t.TempDir(),\n\t\t\t\tOutputs: make(map[types.EgressType][]OutputConfig),\n\t\t\t}\n\n\t\t\t_, err := p.getEncodedFileConfig(&livekit.EncodedFileOutput{\n\t\t\t\tFileType: livekit.EncodedFileType_MP4,\n\t\t\t\tFilepath: test.filepath,\n\t\t\t})\n\n\t\t\tif test.expectErr {\n\t\t\t\trequire.ErrorIs(t, err, errors.ErrNonRetryableOutput)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestSegmentOutputRetrySafety(t *testing.T) {\n\tfor _, test := range []struct {\n\t\tname       string\n\t\tretryCount int32\n\t\tprefix     string\n\t\tplaylist   string\n\t\texpectErr  bool\n\t}{\n\t\t{\n\t\t\tname:       \"first attempt with explicit prefix\",\n\t\t\tretryCount: 0,\n\t\t\tprefix:     \"segments/my-stream\",\n\t\t\tplaylist:   \"segments/playlist\",\n\t\t\texpectErr:  false,\n\t\t},\n\t\t{\n\t\t\tname:       \"retry with both empty (auto-generated)\",\n\t\t\tretryCount: 1,\n\t\t\tprefix:     \"\",\n\t\t\tplaylist:   \"\",\n\t\t\texpectErr:  false,\n\t\t},\n\t\t{\n\t\t\tname:       \"retry with {retry} in prefix only\",\n\t\t\tretryCount: 1,\n\t\t\tprefix:     \"segments/my-stream-{retry}\",\n\t\t\tplaylist:   \"segments/playlist\",\n\t\t\texpectErr:  false,\n\t\t},\n\t\t{\n\t\t\tname:       \"retry with {retry} in playlist only (prefix explicit)\",\n\t\t\tretryCount: 1,\n\t\t\tprefix:     \"segments/my-stream\",\n\t\t\tplaylist:   \"segments/playlist-{retry}\",\n\t\t\texpectErr:  true,\n\t\t},\n\t\t{\n\t\t\tname:       \"retry with {retry} in both\",\n\t\t\tretryCount: 1,\n\t\t\tprefix:     \"segments/my-stream-{retry}\",\n\t\t\tplaylist:   \"segments/playlist-{retry}\",\n\t\t\texpectErr:  false,\n\t\t},\n\t\t{\n\t\t\tname:       \"retry with explicit prefix missing {retry}\",\n\t\t\tretryCount: 1,\n\t\t\tprefix:     \"segments/my-stream\",\n\t\t\tplaylist:   \"\",\n\t\t\texpectErr:  true,\n\t\t},\n\t\t{\n\t\t\tname:       \"retry with {retry} in playlist only (prefix empty, derives from playlist)\",\n\t\t\tretryCount: 1,\n\t\t\tprefix:     \"\",\n\t\t\tplaylist:   \"segments/playlist-{retry}\",\n\t\t\texpectErr:  false,\n\t\t},\n\t\t{\n\t\t\tname:       \"retry with explicit playlist missing {retry}\",\n\t\t\tretryCount: 1,\n\t\t\tprefix:     \"\",\n\t\t\tplaylist:   \"segments/playlist\",\n\t\t\texpectErr:  true,\n\t\t},\n\t\t{\n\t\t\tname:       \"retry with both explicit and neither has {retry}\",\n\t\t\tretryCount: 1,\n\t\t\tprefix:     \"segments/my-stream\",\n\t\t\tplaylist:   \"segments/playlist\",\n\t\t\texpectErr:  true,\n\t\t},\n\t} {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tp := &PipelineConfig{\n\t\t\t\tInfo:    &livekit.EgressInfo{EgressId: \"test_egress\", RoomName: \"test-room\", RetryCount: test.retryCount},\n\t\t\t\tTmpDir:  t.TempDir(),\n\t\t\t\tOutputs: make(map[types.EgressType][]OutputConfig),\n\t\t\t}\n\n\t\t\tseg := &livekit.SegmentedFileOutput{\n\t\t\t\tFilenamePrefix: test.prefix,\n\t\t\t\tPlaylistName:   test.playlist,\n\t\t\t}\n\t\t\t_, err := p.getSegmentConfig(seg, seg)\n\n\t\t\tif test.expectErr {\n\t\t\t\trequire.ErrorIs(t, err, errors.ErrNonRetryableOutput)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}"
  },
  {
    "path": "pkg/config/service.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"gopkg.in/yaml.v3\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/rpc\"\n\t\"github.com/livekit/protocol/utils\"\n)\n\nconst (\n\troomCompositeCpuCost      = 4\n\taudioRoomCompositeCpuCost = 1\n\twebCpuCost                = 4\n\taudioWebCpuCost           = 1\n\tparticipantCpuCost        = 2\n\ttrackCompositeCpuCost     = 1\n\ttrackCpuCost              = 0.5\n\tmaxCpuUtilization         = 0.8\n\tmaxUploadQueue            = 60\n\n\tdefaultTemplatePort         = 7980\n\tdefaultTemplateBaseTemplate = \"http://localhost:%d/\"\n\n\tdefaultIOCreateTimeout = time.Second * 15\n\tdefaultIOUpdateTimeout = time.Second * 30\n\tdefaultIOWorkers       = 5\n\n\tdefaultJitterBufferLatency   = time.Second * 2\n\tdefaultAudioMixerLatency     = time.Millisecond * 2750\n\tdefaultPipelineLatency       = time.Second * 3\n\tdefaultRTPMaxDriftAdjustment = time.Millisecond * 5\n\tdefaultOldPacketThreshold    = 2200 * time.Millisecond\n\tdefaultRTPMaxAllowedTsDiff   = time.Second * 5\n\n\tdefaultAudioTempoControllerAdjustmentRate = 0.05\n\n\tdefaultMaxPulseClients = 60\n)\n\ntype ServiceConfig struct {\n\tBaseConfig `yaml:\",inline\"`\n\n\tHealthPort       int `yaml:\"health_port\"`        // health check port\n\tTemplatePort     int `yaml:\"template_port\"`      // room composite template server port\n\tPrometheusPort   int `yaml:\"prometheus_port\"`    // prometheus handler port\n\tDebugHandlerPort int `yaml:\"debug_handler_port\"` // egress debug handler port\n\n\t*CPUCostConfig `yaml:\"cpu_cost\"` // CPU costs for the different egress types\n}\n\n// MemorySource defines how memory usage is measured for admission and kill decisions.\ntype MemorySource string\n\nconst (\n\t// MemorySourceProcRSS uses per-process RSS sum from hwstats (existing behavior).\n\tMemorySourceProcRSS MemorySource = \"proc_rss\"\n\t// MemorySourceCgroup uses cgroup-aware memory usage (working set).\n\tMemorySourceCgroup MemorySource = \"cgroup\"\n)\n\ntype CPUCostConfig struct {\n\tMaxCpuUtilization         float64 `yaml:\"max_cpu_utilization\"` // maximum allowed CPU utilization when deciding to accept a request. Default to 80%\n\tMaxMemory                 float64 `yaml:\"max_memory\"`          // maximum allowed memory usage in GB. 0 to disable\n\tMemoryCost                float64 `yaml:\"memory_cost\"`         // minimum memory in GB\n\tRoomCompositeCpuCost      float64 `yaml:\"room_composite_cpu_cost\"`\n\tAudioRoomCompositeCpuCost float64 `yaml:\"audio_room_composite_cpu_cost\"`\n\tWebCpuCost                float64 `yaml:\"web_cpu_cost\"`\n\tAudioWebCpuCost           float64 `yaml:\"audio_web_cpu_cost\"`\n\tParticipantCpuCost        float64 `yaml:\"participant_cpu_cost\"`\n\tTrackCompositeCpuCost     float64 `yaml:\"track_composite_cpu_cost\"`\n\tTrackCpuCost              float64 `yaml:\"track_cpu_cost\"`\n\tMaxPulseClients           int     `yaml:\"max_pulse_clients\"` // pulse client limit for launching chrome\n\n\t// Memory source configuration (cgroup-aware memory accounting)\n\tMemorySource       MemorySource `yaml:\"memory_source\"`         // memory measurement source: proc_rss, cgroup\n\tMemoryKillGraceSec int          `yaml:\"memory_kill_grace_sec\"` // grace period in update cycles before kill (0 = immediate)\n}\n\nfunc NewServiceConfig(confString string) (*ServiceConfig, error) {\n\tconf := &ServiceConfig{\n\t\tBaseConfig: BaseConfig{\n\t\t\tLogging: &logger.Config{\n\t\t\t\tLevel: \"info\",\n\t\t\t},\n\t\t\tApiKey:    os.Getenv(\"LIVEKIT_API_KEY\"),\n\t\t\tApiSecret: os.Getenv(\"LIVEKIT_API_SECRET\"),\n\t\t\tWsUrl:     os.Getenv(\"LIVEKIT_WS_URL\"),\n\t\t},\n\t\tCPUCostConfig: &CPUCostConfig{},\n\t}\n\tif confString != \"\" {\n\t\tif err := yaml.Unmarshal([]byte(confString), conf); err != nil {\n\t\t\treturn nil, errors.ErrCouldNotParseConfig(err)\n\t\t}\n\t}\n\n\t// always create a new node ID\n\tconf.NodeID = utils.NewGuid(\"NE_\")\n\tconf.InitDefaults()\n\n\trpc.InitPSRPCStats(prometheus.Labels{\"node_id\": conf.NodeID, \"node_type\": \"EGRESS\"})\n\n\tif err := conf.initLogger(\"nodeID\", conf.NodeID, \"clusterID\", conf.ClusterID); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn conf, nil\n}\n\nfunc (c *ServiceConfig) InitDefaults() {\n\tif c.CPUCostConfig == nil {\n\t\tc.CPUCostConfig = new(CPUCostConfig)\n\t}\n\n\tif c.TemplatePort == 0 {\n\t\tc.TemplatePort = defaultTemplatePort\n\t}\n\tif c.TemplateBase == \"\" {\n\t\tc.TemplateBase = fmt.Sprintf(defaultTemplateBaseTemplate, c.TemplatePort)\n\t}\n\n\tif c.IOCreateTimeout == 0 {\n\t\tc.IOCreateTimeout = defaultIOCreateTimeout\n\t}\n\tif c.IOUpdateTimeout == 0 {\n\t\tc.IOUpdateTimeout = defaultIOUpdateTimeout\n\t}\n\tif c.IOWorkers <= 0 {\n\t\tc.IOWorkers = defaultIOWorkers\n\t}\n\n\t// Setting CPU costs from config. Ensure that CPU costs are positive\n\tif c.MaxCpuUtilization <= 0 || c.MaxCpuUtilization > 1 {\n\t\tc.MaxCpuUtilization = maxCpuUtilization\n\t}\n\tif c.RoomCompositeCpuCost <= 0 {\n\t\tc.RoomCompositeCpuCost = roomCompositeCpuCost\n\t}\n\tif c.AudioRoomCompositeCpuCost <= 0 {\n\t\tc.AudioRoomCompositeCpuCost = audioRoomCompositeCpuCost\n\t}\n\tif c.WebCpuCost <= 0 {\n\t\tc.WebCpuCost = webCpuCost\n\t}\n\tif c.AudioWebCpuCost <= 0 {\n\t\tc.AudioWebCpuCost = audioWebCpuCost\n\t}\n\tif c.ParticipantCpuCost <= 0 {\n\t\tc.ParticipantCpuCost = participantCpuCost\n\t}\n\tif c.TrackCompositeCpuCost <= 0 {\n\t\tc.TrackCompositeCpuCost = trackCompositeCpuCost\n\t}\n\tif c.TrackCpuCost <= 0 {\n\t\tc.TrackCpuCost = trackCpuCost\n\t}\n\tif c.MaxPulseClients == 0 {\n\t\tc.MaxPulseClients = defaultMaxPulseClients\n\t}\n\n\t// Memory source defaults to proc_rss (preserves existing behavior)\n\tif c.MemorySource == \"\" {\n\t\tc.MemorySource = MemorySourceProcRSS\n\t}\n\t// Validate memory source\n\tswitch c.MemorySource {\n\tcase MemorySourceProcRSS, MemorySourceCgroup:\n\t\t// valid\n\tdefault:\n\t\tlogger.Warnw(\"unknown memory_source, falling back to proc_rss\", nil, \"memorySource\", c.MemorySource)\n\t\tc.MemorySource = MemorySourceProcRSS\n\t}\n\n\tif c.MaxUploadQueue <= 0 {\n\t\tc.MaxUploadQueue = maxUploadQueue\n\t}\n\n\tapplyLatencyDefaults(&c.Latency)\n\n\tif c.AudioTempoController.Enabled {\n\t\tif c.AudioTempoController.AdjustmentRate > 0.2 || c.AudioTempoController.AdjustmentRate <= 0 {\n\t\t\tc.AudioTempoController.AdjustmentRate = defaultAudioTempoControllerAdjustmentRate\n\t\t}\n\t}\n}\n\nfunc applyLatencyDefaults(latency *LatencyConfig) {\n\tif latency.JitterBufferLatency == 0 {\n\t\tlatency.JitterBufferLatency = defaultJitterBufferLatency\n\t}\n\tif latency.AudioMixerLatency == 0 {\n\t\tlatency.AudioMixerLatency = defaultAudioMixerLatency\n\t}\n\tif latency.PipelineLatency == 0 {\n\t\tlatency.PipelineLatency = defaultPipelineLatency\n\t}\n\tif latency.RTPMaxAllowedTsDiff == 0 {\n\t\tlatency.RTPMaxAllowedTsDiff = defaultRTPMaxAllowedTsDiff\n\t}\n\tif latency.RTPMaxAllowedTsDiff < latency.JitterBufferLatency {\n\t\t// RTP max allowed ts diff must be equal or greater than jitter buffer latency to absorb the jitter buffer burst\n\t\tlatency.RTPMaxAllowedTsDiff = latency.JitterBufferLatency\n\t}\n\tif latency.RTPMaxDriftAdjustment == 0 {\n\t\tlatency.RTPMaxDriftAdjustment = defaultRTPMaxDriftAdjustment\n\t}\n\tif latency.OldPacketThreshold == 0 {\n\t\tlatency.OldPacketThreshold = defaultOldPacketThreshold\n\t}\n}\n"
  },
  {
    "path": "pkg/config/storage.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"time\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/protocol/egress\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/storage\"\n)\n\ntype StorageConfig struct {\n\tPrefix               string `yaml:\"prefix\"` // prefix applied to all filenames\n\tGeneratePresignedUrl bool   `yaml:\"generate_presigned_url\"`\n\n\tS3     *storage.S3Config     `yaml:\"s3\"`     // upload to s3\n\tAzure  *storage.AzureConfig  `yaml:\"azure\"`  // upload to azure\n\tGCP    *storage.GCPConfig    `yaml:\"gcp\"`    // upload to gcp\n\tAliOSS *storage.AliOSSConfig `yaml:\"alioss\"` // upload to aliyun\n}\n\nfunc (p *PipelineConfig) getStorageConfig(req egress.UploadRequest) (*StorageConfig, error) {\n\tsc := &StorageConfig{}\n\tif p.StorageConfig != nil {\n\t\tsc.Prefix = p.StorageConfig.Prefix\n\t\tsc.GeneratePresignedUrl = p.StorageConfig.GeneratePresignedUrl\n\t}\n\n\tif s3 := req.GetS3(); s3 != nil {\n\t\tsc.S3 = &storage.S3Config{\n\t\t\tAccessKey:            s3.AccessKey,\n\t\t\tSecret:               s3.Secret,\n\t\t\tSessionToken:         s3.SessionToken,\n\t\t\tAssumeRoleArn:        s3.AssumeRoleArn,\n\t\t\tAssumeRoleExternalId: s3.AssumeRoleExternalId,\n\t\t\tRegion:               s3.Region,\n\t\t\tEndpoint:             s3.Endpoint,\n\t\t\tBucket:               s3.Bucket,\n\t\t\tForcePathStyle:       s3.ForcePathStyle,\n\t\t\tMetadata:             s3.Metadata,\n\t\t\tTagging:              s3.Tagging,\n\t\t\tContentDisposition:   s3.ContentDisposition,\n\t\t}\n\t\tif p.StorageConfig != nil && p.StorageConfig.S3 != nil {\n\t\t\tsc.S3.MaxRetries = p.StorageConfig.S3.MaxRetries\n\t\t\tsc.S3.MaxRetryDelay = p.StorageConfig.S3.MaxRetryDelay\n\t\t\tsc.S3.MinRetryDelay = p.StorageConfig.S3.MinRetryDelay\n\t\t}\n\n\t\tif sc.S3.AssumeRoleArn == \"\" {\n\t\t\tsc.S3.AssumeRoleArn = p.S3AssumeRoleArn\n\t\t\tsc.S3.AssumeRoleExternalId = p.S3AssumeRoleExternalID\n\t\t}\n\n\t\tif sc.S3.AssumeRoleArn != \"\" && sc.S3.AccessKey == \"\" {\n\t\t\tif p.S3AssumeRoleKey == \"\" {\n\t\t\t\treturn nil, errors.ErrFeatureDisabled(\"S3 upload using AssumeRole\")\n\t\t\t}\n\t\t\t// If an AssummedRole is set but not any AccessKey, default to using the one from conf. This is useful for uploading to S3\n\t\t\t// using an external account.\n\t\t\tsc.S3.AccessKey = p.S3AssumeRoleKey\n\t\t\tsc.S3.Secret = p.S3AssumeRoleSecret\n\t\t}\n\n\t\tif s3.Proxy != nil {\n\t\t\tsc.S3.ProxyConfig = &storage.ProxyConfig{\n\t\t\t\tUrl:      s3.Proxy.Url,\n\t\t\t\tUsername: s3.Proxy.Username,\n\t\t\t\tPassword: s3.Proxy.Password,\n\t\t\t}\n\t\t}\n\t\tif sc.S3.MaxRetries == 0 {\n\t\t\tsc.S3.MaxRetries = 5\n\t\t}\n\t\tif sc.S3.MaxRetryDelay == 0 {\n\t\t\tsc.S3.MaxRetryDelay = time.Second * 5\n\t\t}\n\t\tif sc.S3.MinRetryDelay == 0 {\n\t\t\tsc.S3.MinRetryDelay = time.Millisecond * 100\n\t\t}\n\t\treturn sc, nil\n\t}\n\n\tif gcp := req.GetGcp(); gcp != nil {\n\t\tsc.GCP = &storage.GCPConfig{\n\t\t\tCredentialsJSON: gcp.Credentials,\n\t\t\tBucket:          gcp.Bucket,\n\t\t}\n\t\tif gcp.Proxy != nil {\n\t\t\tsc.GCP.ProxyConfig = &storage.ProxyConfig{\n\t\t\t\tUrl:      gcp.Proxy.Url,\n\t\t\t\tUsername: gcp.Proxy.Username,\n\t\t\t\tPassword: gcp.Proxy.Password,\n\t\t\t}\n\t\t}\n\t\treturn sc, nil\n\t}\n\n\tif azure := req.GetAzure(); azure != nil {\n\t\tsc.Azure = &storage.AzureConfig{\n\t\t\tAccountName:   azure.AccountName,\n\t\t\tAccountKey:    azure.AccountKey,\n\t\t\tContainerName: azure.ContainerName,\n\t\t}\n\t\treturn sc, nil\n\t}\n\n\tif ali := req.GetAliOSS(); ali != nil {\n\t\tsc.AliOSS = &storage.AliOSSConfig{\n\t\t\tAccessKey: ali.AccessKey,\n\t\t\tSecret:    ali.Secret,\n\t\t\tEndpoint:  ali.Endpoint,\n\t\t\tBucket:    ali.Bucket,\n\t\t}\n\t\treturn sc, nil\n\t}\n\n\tsc = p.StorageConfig\n\tif p.DisallowLocalStorage && (sc == nil || sc.IsLocal()) {\n\t\treturn nil, errors.ErrInvalidInput(\"output\")\n\t}\n\n\treturn sc, nil\n}\n\nfunc (c *StorageConfig) IsLocal() bool {\n\treturn c.S3 == nil && c.GCP == nil && c.Azure == nil && c.AliOSS == nil\n}\n\n// resolveStorageConfig returns the first non-nil StorageConfig from the chain:\n// per-output override -> request-level default.\n// Server config fallback is handled by getStorageConfig when result is nil.\nfunc resolveStorageConfig(outputStorage, requestStorage *livekit.StorageConfig) *livekit.StorageConfig {\n\tif outputStorage != nil {\n\t\treturn outputStorage\n\t}\n\treturn requestStorage\n}\n"
  },
  {
    "path": "pkg/config/test_overrides.go",
    "content": "package config\n\n// TestOverrides is used to override the default configuration for testing purposes.\ntype TestOverrides struct {\n\t// inject failure for rooms containing this substring, useful for testing failure conditions\n\tFailureInjectionRoom string `yaml:\"failure_injection_room\"`\n}\n"
  },
  {
    "path": "pkg/config/urls.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/go-jose/go-jose/v4/json\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/utils\"\n)\n\n// rtmp urls must be of format rtmp(s)://{host}(/{path})/{app}/{stream_key}( live=1)\nvar (\n\trtmpRegexp     = regexp.MustCompile(`^(rtmps?://)(.*/)(.*/)(\\S*)( live=1)?$`)\n\ttwitchEndpoint = regexp.MustCompile(`^rtmps?://.*\\.contribute\\.live-video\\.net/app/(.*)( live=1)?$`)\n)\n\nfunc (o *StreamConfig) AddStream(rawUrl string, outputType types.OutputType) (*Stream, error) {\n\tparsed, redacted, streamID, err := o.ValidateUrl(rawUrl, outputType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstream := &Stream{\n\t\tParsedUrl:   parsed,\n\t\tRedactedUrl: redacted,\n\t\tStreamID:    streamID,\n\t\tStreamInfo: &livekit.StreamInfo{\n\t\t\tUrl:    redacted,\n\t\t\tStatus: livekit.StreamInfo_ACTIVE,\n\t\t},\n\t}\n\tif outputType != types.OutputTypeRTMP {\n\t\tstream.StreamInfo.StartedAt = time.Now().UnixNano()\n\t}\n\to.Streams.Store(parsed, stream)\n\n\treturn stream, nil\n}\n\nfunc (o *StreamConfig) ValidateUrl(rawUrl string, outputType types.OutputType) (\n\tparsed string, redacted string, streamID string, err error,\n) {\n\tparsedUrl, err := url.Parse(rawUrl)\n\tif err != nil {\n\t\terr = errors.ErrInvalidUrl(rawUrl, err.Error())\n\t\treturn\n\t}\n\tif types.StreamOutputTypes[parsedUrl.Scheme] != outputType {\n\t\terr = errors.ErrInvalidUrl(rawUrl, \"invalid scheme\")\n\t\treturn\n\t}\n\n\tswitch outputType {\n\tcase types.OutputTypeRTMP:\n\t\tif parsedUrl.Scheme == \"mux\" {\n\t\t\tparsed = fmt.Sprintf(\"rtmps://global-live.mux.com:443/app/%s\", parsedUrl.Host)\n\t\t} else if parsedUrl.Scheme == \"twitch\" {\n\t\t\tparsed, err = o.updateTwitchURL(parsedUrl.Host)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else if match := twitchEndpoint.FindStringSubmatch(rawUrl); len(match) > 0 {\n\t\t\tif updated, err := o.updateTwitchURL(match[1]); err == nil {\n\t\t\t\tparsed = updated\n\t\t\t}\n\t\t} else {\n\t\t\tparsed = rawUrl\n\t\t}\n\n\t\tvar ok bool\n\t\tredacted, streamID, ok = redactStreamKey(parsed)\n\t\tif !ok {\n\t\t\terr = errors.ErrInvalidUrl(rawUrl, \"rtmp urls must be of format rtmp(s)://{host}(/{path})/{app}/{stream_key}( live=1)\")\n\t\t}\n\t\treturn\n\n\tcase types.OutputTypeSRT, types.OutputTypeRaw:\n\t\tparsed = rawUrl\n\t\tredacted = rawUrl\n\t\treturn\n\n\tdefault:\n\t\terr = errors.ErrInvalidInput(\"stream output type\")\n\t\treturn\n\t}\n}\n\nfunc (o *StreamConfig) GetStream(rawUrl string) (*Stream, error) {\n\tparsedUrl, err := url.Parse(rawUrl)\n\tif err != nil {\n\t\treturn nil, errors.ErrInvalidUrl(rawUrl, err.Error())\n\t}\n\n\tvar parsed string\n\tif parsedUrl.Scheme == \"mux\" {\n\t\tparsed = fmt.Sprintf(\"rtmps://global-live.mux.com:443/app/%s\", parsedUrl.Host)\n\t} else if parsedUrl.Scheme == \"twitch\" {\n\t\tparsed, err = o.updateTwitchURL(parsedUrl.Host)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else if match := twitchEndpoint.FindStringSubmatch(rawUrl); len(match) > 0 {\n\t\tparsed, err = o.updateTwitchURL(match[1])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tparsed = rawUrl\n\t}\n\n\tstream, ok := o.Streams.Load(parsed)\n\tif !ok {\n\t\treturn nil, errors.ErrStreamNotFound(rawUrl)\n\t}\n\treturn stream.(*Stream), nil\n}\n\nfunc (o *StreamConfig) updateTwitchURL(key string) (string, error) {\n\tif err := o.updateTwitchTemplate(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strings.ReplaceAll(o.twitchTemplate, \"{stream_key}\", key), nil\n}\n\nfunc (o *StreamConfig) updateTwitchTemplate() error {\n\tif o.twitchTemplate != \"\" {\n\t\treturn nil\n\t}\n\n\tresp, err := http.Get(\"https://ingest.twitch.tv/ingests\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar body struct {\n\t\tIngests []struct {\n\t\t\tName              string `json:\"name\"`\n\t\t\tURLTemplate       string `json:\"url_template\"`\n\t\t\tURLTemplateSecure string `json:\"url_template_secure\"`\n\t\t\tPriority          int    `json:\"priority\"`\n\t\t} `json:\"ingests\"`\n\t}\n\tif err = json.NewDecoder(resp.Body).Decode(&body); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, ingest := range body.Ingests {\n\t\tif ingest.URLTemplateSecure != \"\" {\n\t\t\to.twitchTemplate = ingest.URLTemplateSecure\n\t\t\treturn nil\n\t\t} else if ingest.URLTemplate != \"\" {\n\t\t\to.twitchTemplate = ingest.URLTemplate\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn errors.New(\"no ingest found\")\n}\n\nfunc redactStreamKey(url string) (string, string, bool) {\n\tmatch := rtmpRegexp.FindStringSubmatch(url)\n\tif len(match) != 6 {\n\t\treturn url, \"\", false\n\t}\n\n\tstreamID := match[4]\n\tmatch[4] = utils.RedactIdentifier(match[4])\n\treturn strings.Join(match[1:], \"\"), streamID, true\n}\n"
  },
  {
    "path": "pkg/config/urls_test.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage config\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/types\"\n)\n\nfunc TestValidateUrl(t *testing.T) {\n\tvar twitchUpdated = regexp.MustCompile(\"rtmps://(.*).contribute.live-video.net/app/streamkey\")\n\tvar twitchRedacted = regexp.MustCompile(`rtmps://(.*).contribute.live-video.net/app/\\{str\\.\\.\\.key}`)\n\n\to := &StreamConfig{}\n\n\tfor _, test := range []struct {\n\t\turl      string\n\t\ttwitch   bool\n\t\tparsed   string\n\t\tredacted string\n\t}{\n\t\t{\n\t\t\turl:      \"mux://streamkey\",\n\t\t\tparsed:   \"rtmps://global-live.mux.com:443/app/streamkey\",\n\t\t\tredacted: \"rtmps://global-live.mux.com:443/app/{str...key}\",\n\t\t},\n\t\t{\n\t\t\turl:    \"twitch://streamkey\",\n\t\t\ttwitch: true,\n\t\t},\n\t\t{\n\t\t\turl:    \"rtmp://fake.contribute.live-video.net/app/streamkey\",\n\t\t\ttwitch: true,\n\t\t},\n\t\t{\n\t\t\turl:      \"rtmp://localhost:1935/live/streamkey\",\n\t\t\tparsed:   \"rtmp://localhost:1935/live/streamkey\",\n\t\t\tredacted: \"rtmp://localhost:1935/live/{str...key}\",\n\t\t},\n\t\t{\n\t\t\turl:      \"rtmps://localhost:1935/live/streamkey\",\n\t\t\tparsed:   \"rtmps://localhost:1935/live/streamkey\",\n\t\t\tredacted: \"rtmps://localhost:1935/live/{str...key}\",\n\t\t},\n\t} {\n\t\tparsed, redacted, streamID, err := o.ValidateUrl(test.url, types.OutputTypeRTMP)\n\t\trequire.NoError(t, err)\n\t\trequire.NotEmpty(t, streamID)\n\n\t\tif test.twitch {\n\t\t\trequire.NotEmpty(t, twitchUpdated.FindString(parsed), parsed)\n\t\t\trequire.NotEmpty(t, twitchRedacted.FindString(redacted), redacted)\n\t\t} else {\n\t\t\trequire.Equal(t, test.parsed, parsed)\n\t\t\trequire.Equal(t, test.redacted, redacted)\n\t\t}\n\t}\n}\n\nfunc TestGetUrl(t *testing.T) {\n\to := &StreamConfig{}\n\trequire.NoError(t, o.updateTwitchTemplate())\n\n\tparsedTwitchUrl := strings.ReplaceAll(o.twitchTemplate, \"{stream_key}\", \"streamkey\")\n\turls := []string{\n\t\t\"rtmps://global-live.mux.com:443/app/streamkey\",\n\t\tparsedTwitchUrl,\n\t\tparsedTwitchUrl,\n\t\t\"rtmp://localhost:1935/live/streamkey\",\n\t}\n\n\tfor _, url := range []string{urls[0], urls[1], urls[3]} {\n\t\t_, err := o.AddStream(url, types.OutputTypeRTMP)\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor i, rawUrl := range []string{\n\t\t\"mux://streamkey\",\n\t\t\"twitch://streamkey\",\n\t\t\"rtmp://any.contribute.live-video.net/app/streamkey\",\n\t\t\"rtmp://localhost:1935/live/streamkey\",\n\t} {\n\t\tstream, err := o.GetStream(rawUrl)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, urls[i], stream.ParsedUrl)\n\t}\n}\n"
  },
  {
    "path": "pkg/errors/errors.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage errors\n\nimport (\n\t\"errors\"\n\t\"strings\"\n\n\t\"github.com/livekit/psrpc\"\n)\n\nfunc New(err string) error {\n\treturn errors.New(err)\n}\n\nfunc Is(err, target error) bool {\n\treturn errors.Is(err, target)\n}\n\nfunc As(err error, target any) bool {\n\treturn errors.As(err, target)\n}\n\ntype ErrArray struct {\n\terrs []error\n}\n\nfunc (e *ErrArray) AppendErr(err error) {\n\te.errs = append(e.errs, err)\n}\n\nfunc (e *ErrArray) Check(err error) {\n\tif err != nil {\n\t\te.errs = append(e.errs, err)\n\t}\n}\n\nfunc (e *ErrArray) ToError() psrpc.Error {\n\tif len(e.errs) == 0 {\n\t\treturn nil\n\t}\n\n\tcode := psrpc.Unknown\n\tvar errStr []string\n\n\t// Return the code for the first error of type psrpc.Error\n\tfor _, err := range e.errs {\n\t\tvar psrpcErr psrpc.Error\n\n\t\tif code == psrpc.Unknown && errors.As(err, &psrpcErr) {\n\t\t\tcode = psrpcErr.Code()\n\t\t}\n\n\t\terrStr = append(errStr, err.Error())\n\t}\n\n\treturn psrpc.NewErrorf(code, \"%s\", strings.Join(errStr, \"\\n\"))\n}\n\n// internal errors\n\nvar (\n\tErrNoConfig        = psrpc.NewErrorf(psrpc.Internal, \"missing config\")\n\tErrGhostPadFailed  = psrpc.NewErrorf(psrpc.Internal, \"failed to add ghost pad to bin\")\n\tErrBinAlreadyAdded = psrpc.NewErrorf(psrpc.Internal, \"bin already added to pipeline\")\n\tErrWrongHierarchy  = psrpc.NewErrorf(psrpc.Internal, \"pipeline can contain bins or elements, not both\")\n\tErrPipelineFrozen  = psrpc.NewErrorf(psrpc.Internal, \"pipeline frozen\")\n\tErrSinkNotFound    = psrpc.NewErrorf(psrpc.Internal, \"sink not found\")\n)\n\nfunc ErrPadLinkFailed(src, sink, status string) error {\n\treturn psrpc.NewErrorf(psrpc.Internal, \"failed to link %s to %s: %s\", src, sink, status)\n}\n\nfunc ErrGstPipelineError(err error) error {\n\treturn psrpc.NewError(psrpc.Internal, err)\n}\n\nfunc ErrProcessFailed(process string, err error) error {\n\treturn psrpc.NewErrorf(psrpc.Internal, \"failed to launch %s: %v\", process, err)\n}\n\nfunc ChromeError(err error) error {\n\treturn psrpc.NewError(psrpc.Internal, err)\n}\n\n// other errors\n\nvar (\n\tErrNonStreamingPipeline       = psrpc.NewErrorf(psrpc.InvalidArgument, \"UpdateStream called on non-streaming egress\")\n\tErrNoCompatibleCodec          = psrpc.NewErrorf(psrpc.InvalidArgument, \"no supported codec is compatible with all outputs\")\n\tErrNoCompatibleFileOutputType = psrpc.NewErrorf(psrpc.InvalidArgument, \"no supported file output type is compatible with the selected codecs\")\n\tErrEgressNotFound             = psrpc.NewErrorf(psrpc.NotFound, \"egress not found\")\n\tErrEgressAlreadyExists        = psrpc.NewErrorf(psrpc.AlreadyExists, \"egress already exists\")\n\tErrSubscriptionFailed         = psrpc.NewErrorf(psrpc.Unavailable, \"failed to subscribe to track\")\n\tErrNotEnoughCPU               = psrpc.NewErrorf(psrpc.Unavailable, \"not enough CPU\")\n\tErrShuttingDown               = psrpc.NewErrorf(psrpc.Unavailable, \"server is shutting down\")\n\tErrNonRetryableOutput         = psrpc.NewErrorf(psrpc.FailedPrecondition, \"output configuration does not support retry\")\n\tErrHandlerFailedToStart       = psrpc.NewErrorf(psrpc.Internal, \"handler failed to start\")\n)\n\nfunc PageLoadError(err string) error {\n\terr = strings.TrimPrefix(err, \"page load error \")\n\treturn psrpc.NewErrorf(psrpc.InvalidArgument, \"page load error: %s\", err)\n}\n\nfunc TemplateError(err string) error {\n\treturn psrpc.NewErrorf(psrpc.InvalidArgument, \"template error: %s\", err)\n}\n\nfunc ErrCouldNotParseConfig(err error) error {\n\treturn psrpc.NewErrorf(psrpc.InvalidArgument, \"could not parse config: %v\", err)\n}\n\nfunc ErrNotSupported(feature string) error {\n\treturn psrpc.NewErrorf(psrpc.InvalidArgument, \"%s is not yet supported\", feature)\n}\n\nfunc ErrIncompatible(format, codec interface{}) error {\n\treturn psrpc.NewErrorf(psrpc.InvalidArgument, \"format %v incompatible with codec %v\", format, codec)\n}\n\nfunc ErrInvalidInput(field string) error {\n\treturn psrpc.NewErrorf(psrpc.InvalidArgument, \"request has missing or invalid field: %s\", field)\n}\n\nfunc ErrInvalidUrl(url string, reason string) error {\n\treturn psrpc.NewErrorf(psrpc.InvalidArgument, \"invalid url %s: %s\", url, reason)\n}\n\nfunc ErrUploadFailed(location string, err error) error {\n\treturn psrpc.NewErrorf(psrpc.InvalidArgument, \"%s upload failed: %v\", location, err)\n}\n\nfunc ErrParticipantNotFound(identity string) error {\n\treturn psrpc.NewErrorf(psrpc.NotFound, \"participant %s not found\", identity)\n}\n\nfunc ErrStreamNotFound(url string) error {\n\treturn psrpc.NewErrorf(psrpc.NotFound, \"stream %s not found\", url)\n}\n\nfunc ErrTrackNotFound(trackID string) error {\n\treturn psrpc.NewErrorf(psrpc.NotFound, \"track %s not found\", trackID)\n}\n\nfunc ErrFeatureDisabled(feature string) error {\n\treturn psrpc.NewErrorf(psrpc.PermissionDenied, \"%s is disabled for this account\", feature)\n}\n\nfunc ErrCPUExhausted(usage float64) error {\n\treturn psrpc.NewErrorf(psrpc.PermissionDenied, \"CPU exhausted: %.2f cores used\", usage)\n}\n\nfunc ErrOOM(usage float64) error {\n\treturn psrpc.NewErrorf(psrpc.PermissionDenied, \"OOM: %.2f GB used\", usage)\n}\n"
  },
  {
    "path": "pkg/gstreamer/bin.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage gstreamer\n\nimport (\n\t\"fmt\"\n\t\"slices\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/go-gst/go-glib/glib\"\n\t\"github.com/go-gst/go-gst/gst\"\n\t\"github.com/linkdata/deadlock\"\n\t\"go.uber.org/atomic\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\nconst (\n\tremoveSourceBinTimeout = 3 * time.Second\n)\n\n// Locking rules for Bin/StateManager (maintainer reference):\n//  1. if both state and bin data are needed, take StateManager lock first\n//     (LockState/LockStateShared), then Bin.mu.\n//  2. for multi-bin operations, take the \"owner\"/parent bin mutex before peer\n//     bin mutexes (for example: b.mu -> src.mu -> sink.mu).\n//  3. do not introduce paths that acquire locks in the reverse order\n//     (peer/child -> parent), or AB-BA deadlocks are possible.\n//  4. for work executed later on the GLib loop (IdleAdd callbacks), snapshot\n//     fields while holding the lock that protects them:\n//     - State under StateManager lock.\n//     - Bin fields (`srcs`, `sinks`, `elements`, `pads`, etc.) under `b.mu`.\n//     Then unlock before scheduling the callback. Avoid holding these locks\n//     while waiting for the callback to run on the GLib loop.\n//     Exception: ForceRemoveSourceBin intentionally holds a shared state lock\n//     across the wait to prevent state-mutation races from concurrent Stop/\n//     state-transition calls while forced detach is in progress.\n//\n\n// Bin is designed to hold a single stream, with any number of sources and sinks\ntype Bin struct {\n\t*Callbacks\n\t*StateManager\n\n\tpipeline *gst.Pipeline\n\tmu       deadlock.Mutex\n\tbin      *gst.Bin\n\tlatency  time.Duration\n\n\tlinkFunc   func([]*gst.Element) error\n\tshouldLink func(string) bool\n\teosFunc    func() bool\n\tgetSrcPad  func(string) *gst.Pad\n\tgetSinkPad func(string) *gst.Pad\n\n\tadded    bool\n\tsrcs     []*Bin                   // source bins\n\telements []*gst.Element           // elements within this bin\n\tqueues   map[string]*gst.Element  // used with BinTypeMultiStream\n\tpads     map[string]*gst.GhostPad // ghost pads by bin name\n\teosSeen  map[string]*atomic.Bool  // downstream EOS seen per peer bin name\n\tsinks    []*Bin                   // sink bins\n}\n\nfunc (b *Bin) NewBin(name string) *Bin {\n\treturn &Bin{\n\t\tCallbacks:    b.Callbacks,\n\t\tStateManager: b.StateManager,\n\t\tpipeline:     b.pipeline,\n\t\tbin:          gst.NewBin(name),\n\t\tpads:         make(map[string]*gst.GhostPad),\n\t\teosSeen:      make(map[string]*atomic.Bool),\n\t}\n}\n\nfunc (b *Bin) GetName() string {\n\treturn b.bin.GetName()\n}\n\n// AddSourceBin - adds src as a source of b. This should only be called once for each source bin\nfunc (b *Bin) AddSourceBin(src *Bin) error {\n\tlogger.Debugw(fmt.Sprintf(\"adding src %s to %s\", src.bin.GetName(), b.bin.GetName()))\n\treturn b.addBin(src, gst.PadDirectionSource)\n}\n\n// AddSinkBin - adds sink as a sink of b. This should only be called once for each sink bin\nfunc (b *Bin) AddSinkBin(sink *Bin) error {\n\tlogger.Debugw(fmt.Sprintf(\"adding sink %s to %s\", sink.bin.GetName(), b.bin.GetName()))\n\treturn b.addBin(sink, gst.PadDirectionSink)\n}\n\nfunc (b *Bin) addBin(bin *Bin, direction gst.PadDirection) error {\n\tbin.mu.Lock()\n\talreadyAdded := bin.added\n\tbin.added = true\n\tbin.mu.Unlock()\n\tif alreadyAdded {\n\t\treturn errors.ErrBinAlreadyAdded\n\t}\n\n\tb.LockStateShared()\n\tdefer b.UnlockStateShared()\n\n\tstate := b.GetStateLocked()\n\tif state > StateRunning {\n\t\treturn nil\n\t}\n\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tif direction == gst.PadDirectionSource {\n\t\tb.srcs = append(b.srcs, bin)\n\t} else {\n\t\tb.sinks = append(b.sinks, bin)\n\t}\n\n\tif err := b.pipeline.Add(bin.bin.Element); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tif state == StateBuilding {\n\t\treturn nil\n\t}\n\n\tif err := bin.link(); err != nil {\n\t\treturn err\n\t}\n\n\tvar err error\n\tbin.mu.Lock()\n\tif direction == gst.PadDirectionSource {\n\t\terr = linkPeersLocked(bin, b)\n\t} else {\n\t\terr = linkPeersLocked(b, bin)\n\t}\n\tbin.mu.Unlock()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// AddElement - adds element to the bin. Elements will be linked in the order they are added\nfunc (b *Bin) AddElement(e *gst.Element) error {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tb.elements = append(b.elements, e)\n\tif err := b.bin.Add(e); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\treturn nil\n}\n\n// AddElements - adds elements to the bin. Elements will be linked in the order they are added\nfunc (b *Bin) AddElements(elements ...*gst.Element) error {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tb.elements = append(b.elements, elements...)\n\tif err := b.bin.AddMany(elements...); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\treturn nil\n}\n\n// ForceRemoveSourceBin synchronously removes a source bin without waiting for EOS.\n// This is used for FlowFlushing recovery where EOS will never propagate from a stuck appsrc.\n// The removal runs on the GLib main loop thread via glib.IdleAdd and blocks until complete.\nfunc (b *Bin) ForceRemoveSourceBin(name string) error {\n\tlogger.Infow(\"force removing source bin\", \"src\", name, \"from\", b.bin.GetName())\n\n\tb.LockStateShared()\n\tdefer b.UnlockStateShared()\n\n\tstate := b.GetStateLocked()\n\tif state > StateRunning {\n\t\treturn nil\n\t}\n\n\tb.mu.Lock()\n\n\tidx := slices.IndexFunc(b.srcs, func(s *Bin) bool { return s.bin.GetName() == name })\n\tif idx == -1 {\n\t\tb.mu.Unlock()\n\t\treturn nil\n\t}\n\tsrc := b.srcs[idx]\n\n\tsrc.mu.Lock()\n\tsrcGhostPad, sinkGhostPad, ok := deleteGhostPadsLocked(src, b)\n\tsrc.mu.Unlock()\n\tif !ok {\n\t\tb.mu.Unlock()\n\t\treturn errors.New(\"ghost pads not found for force removal\")\n\t}\n\n\t// Now safe to remove from the tracking slice\n\tb.srcs = slices.Delete(b.srcs, idx, idx+1)\n\n\t// Capture references before releasing the lock.\n\t// These fields are set during construction and never modified, so safe to use after unlock.\n\tpeerElement := b.elements[0]\n\tparentBin := b.bin\n\tpipeline := b.pipeline\n\n\tb.mu.Unlock()\n\n\t// Execute removal synchronously on the GLib main loop thread\n\tdone := make(chan error, 1)\n\tif _, err := glib.IdleAdd(func() bool {\n\t\tlogger.Debugw(\"force removing source bin on GLib thread\", \"bin\", src.bin.GetName())\n\t\tdone <- detachSourceBin(src, srcGhostPad, sinkGhostPad, peerElement, parentBin, pipeline)\n\t\treturn false\n\t}); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\treturn <-done\n}\n\nfunc (b *Bin) RemoveSourceBin(name string) error {\n\tlogger.Debugw(fmt.Sprintf(\"removing src %s from %s\", name, b.bin.GetName()))\n\treturn b.removeBin(name, gst.PadDirectionSource)\n}\n\nfunc (b *Bin) RemoveSinkBin(name string) error {\n\tlogger.Debugw(fmt.Sprintf(\"removing sink %s from %s\", name, b.bin.GetName()))\n\treturn b.removeBin(name, gst.PadDirectionSink)\n}\n\nfunc (b *Bin) removeSourceLocked(name string) *Bin {\n\tfor i, s := range b.srcs {\n\t\tif s.bin.GetName() == name {\n\t\t\tb.srcs = append(b.srcs[:i], b.srcs[i+1:]...)\n\t\t\treturn s\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (b *Bin) removeBin(name string, direction gst.PadDirection) error {\n\tb.LockStateShared()\n\tdefer b.UnlockStateShared()\n\n\tstate := b.GetStateLocked()\n\tif state > StateRunning {\n\t\treturn nil\n\t}\n\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tvar bin *Bin\n\tif direction == gst.PadDirectionSource {\n\t\tbin = b.removeSourceLocked(name)\n\t} else {\n\t\tfor i, s := range b.sinks {\n\t\t\tif s.bin.GetName() == name {\n\t\t\t\tbin = s\n\t\t\t\tb.sinks = append(b.sinks[:i], b.sinks[i+1:]...)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif bin == nil {\n\t\treturn nil\n\t}\n\n\tif state == StateBuilding {\n\t\tif err := b.pipeline.Remove(bin.bin.Element); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\treturn nil\n\t}\n\n\tif direction == gst.PadDirectionSource {\n\t\tb.probeRemoveSource(bin)\n\t} else {\n\t\tb.probeRemoveSink(bin)\n\t}\n\n\treturn nil\n}\n\nfunc (b *Bin) probeRemoveSource(src *Bin) {\n\tsrc.mu.Lock()\n\tsrcGhostPad, sinkGhostPad, ok := deleteGhostPadsLocked(src, b)\n\tsrc.mu.Unlock()\n\tif !ok {\n\t\treturn\n\t}\n\n\tvar removed atomic.Bool\n\tvar removalScheduled atomic.Bool\n\tsrcPad := srcGhostPad.GetTarget()\n\tsinkPad := sinkGhostPad.GetTarget()\n\n\tvar eosSeen *atomic.Bool\n\tsrc.mu.Lock()\n\tif seen, ok := src.eosSeen[b.bin.GetName()]; ok {\n\t\teosSeen = seen\n\t}\n\tsrc.mu.Unlock()\n\n\tscheduleRemoval := func(reason string) {\n\t\tif !removalScheduled.CompareAndSwap(false, true) {\n\t\t\treturn\n\t\t}\n\n\t\tif _, err := glib.IdleAdd(func() bool {\n\t\t\tremoved.Store(true)\n\t\t\tlogger.Debugw(\"removing source bin\", \"bin\", src.bin.GetName(), \"reason\", reason)\n\t\t\tif err := detachSourceBin(src, srcGhostPad, sinkGhostPad, b.elements[0], b.bin, b.pipeline); err != nil {\n\t\t\t\tlogger.Errorw(\"failed to detach source bin\", err, \"bin\", src.bin.GetName())\n\t\t\t}\n\t\t\treturn false\n\t\t}); err != nil {\n\t\t\tlogger.Errorw(\"failed to schedule source bin removal\", err, \"bin\", src.bin.GetName())\n\t\t}\n\t}\n\n\tprobe := func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn {\n\t\tif removed.Load() {\n\t\t\treturn gst.PadProbeRemove\n\t\t}\n\n\t\tif info.Type()&gst.PadProbeTypeEventDownstream != 0 {\n\t\t\tif event := info.GetEvent(); event != nil && event.Type() == gst.EventTypeEOS {\n\t\t\t\tlogger.Debugw(\"received EOS\", \"bin\", src.bin.GetName())\n\t\t\t\tif eosSeen != nil {\n\t\t\t\t\teosSeen.Store(true)\n\t\t\t\t}\n\t\t\t\tscheduleRemoval(\"eos\")\n\t\t\t}\n\t\t}\n\n\t\treturn gst.PadProbeOK\n\t}\n\tsrcPad.AddProbe(gst.PadProbeTypeEventDownstream, probe)\n\tsinkPad.AddProbe(gst.PadProbeTypeEventDownstream, probe)\n\n\tif eosSeen != nil && eosSeen.Load() {\n\t\tlogger.Debugw(\"eos already seen, removing source bin\", \"bin\", src.bin.GetName(), \"reason\", \"eos-seen-after-probe\")\n\t\tscheduleRemoval(\"eos-seen-after-probe\")\n\t\treturn\n\t}\n\n\ttime.AfterFunc(removeSourceBinTimeout, func() {\n\t\tif removalScheduled.Load() {\n\t\t\treturn\n\t\t}\n\t\tlogger.Warnw(\"timeout waiting for EOS before removing source bin\", nil, \"bin\", src.bin.GetName())\n\t\tscheduleRemoval(\"timeout\")\n\t})\n}\n\nfunc (b *Bin) probeRemoveSink(sink *Bin) {\n\tsink.mu.Lock()\n\tsrcGhostPad, sinkGhostPad, ok := deleteGhostPadsLocked(b, sink)\n\tsink.mu.Unlock()\n\tif !ok {\n\t\treturn\n\t}\n\n\tsrcGhostPad.AddProbe(gst.PadProbeTypeAllBoth, func(_ *gst.Pad, _ *gst.PadProbeInfo) gst.PadProbeReturn {\n\t\tsrcGhostPad.Unlink(sinkGhostPad.Pad)\n\t\tsinkGhostPad.SendEvent(gst.NewEOSEvent())\n\n\t\tb.mu.Lock()\n\t\terr := b.pipeline.Remove(sink.bin.Element)\n\t\tb.mu.Unlock()\n\n\t\tif err != nil {\n\t\t\tb.OnError(errors.ErrGstPipelineError(err))\n\t\t\treturn gst.PadProbeRemove\n\t\t}\n\n\t\tif err = sink.SetState(gst.StateNull); err != nil {\n\t\t\tlogger.Warnw(fmt.Sprintf(\"failed to change %s state\", sink.bin.GetName()), err)\n\t\t}\n\n\t\tb.elements[len(b.elements)-1].ReleaseRequestPad(srcGhostPad.GetTarget())\n\t\tb.bin.RemovePad(srcGhostPad.Pad)\n\t\treturn gst.PadProbeOK\n\t})\n}\n\n// detachSourceBin performs the GStreamer operations to disconnect and remove a source bin.\n// Must be called on the GLib main loop thread.\nfunc detachSourceBin(src *Bin, srcGhostPad, sinkGhostPad *gst.GhostPad, peerElement *gst.Element, parentBin *gst.Bin, pipeline *gst.Pipeline) error {\n\tsinkPad := sinkGhostPad.GetTarget()\n\n\tpeerElement.ReleaseRequestPad(sinkPad)\n\tsrcGhostPad.Unlink(sinkGhostPad.Pad)\n\tparentBin.RemovePad(sinkGhostPad.Pad)\n\n\tif err := pipeline.Remove(src.bin.Element); err != nil {\n\t\tlogger.Warnw(\"failed to remove bin\", err, \"bin\", src.bin.GetName())\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tif err := src.bin.SetState(gst.StateNull); err != nil {\n\t\tlogger.Warnw(\"failed to change bin state\", err, \"bin\", src.bin.GetName())\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\treturn nil\n}\n\nfunc deleteGhostPadsLocked(src, sink *Bin) (*gst.GhostPad, *gst.GhostPad, bool) {\n\tsrcPad, srcOK := src.pads[sink.bin.GetName()]\n\tif !srcOK {\n\t\tlogger.Errorw(\"source pad missing\", nil, \"bin\", src.bin.GetName())\n\t}\n\tdelete(src.pads, sink.bin.GetName())\n\t// keep eosSeen so probeRemoveSource can still detect prior EOS when called after pad deletion\n\n\tsinkPad, sinkOK := sink.pads[src.bin.GetName()]\n\tif !sinkOK {\n\t\tlogger.Errorw(\"sink pad missing\", nil, \"bin\", sink.bin.GetName())\n\t}\n\tdelete(sink.pads, src.bin.GetName())\n\n\treturn srcPad, sinkPad, srcOK && sinkOK\n}\n\nfunc (b *Bin) SetState(state gst.State) error {\n\tstateErr := make(chan error, 1)\n\tgo func() {\n\t\tstateErr <- b.bin.SetState(state)\n\t}()\n\tselect {\n\tcase <-time.After(stateChangeTimeout):\n\t\treturn errors.ErrPipelineFrozen\n\tcase err := <-stateErr:\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t}\n\treturn nil\n}\n\n// SetLinkFunc - sets a custom linking function for this bin's elements (used when you need to modify chain functions)\nfunc (b *Bin) SetLinkFunc(f func([]*gst.Element) error) {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tb.linkFunc = f\n}\n\nfunc (b *Bin) SetShouldLink(f func(string) bool) {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tb.shouldLink = f\n}\n\n// SetGetSrcPad - sets a custom linking function which returns a pad for the named src bin\nfunc (b *Bin) SetGetSrcPad(f func(srcName string) *gst.Pad) {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tb.getSrcPad = f\n}\n\n// SetGetSinkPad - sets a custom linking function which returns a pad for the named sink bin\nfunc (b *Bin) SetGetSinkPad(f func(sinkName string) *gst.Pad) {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tb.getSinkPad = f\n}\n\n// SetEOSFunc - sets a custom EOS function (used for appsrc, input-selector). If it returns true, EOS will also be sent to src bins\nfunc (b *Bin) SetEOSFunc(f func() bool) {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tb.eosFunc = f\n}\n\nfunc (b *Bin) sendEOS() {\n\tb.mu.Lock()\n\teosFunc := b.eosFunc\n\tsrcs := b.srcs\n\tb.mu.Unlock()\n\n\tif eosFunc != nil && !eosFunc() {\n\t\treturn\n\t}\n\n\tif len(srcs) > 0 {\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(len(b.srcs))\n\t\tfor _, src := range srcs {\n\t\t\tgo func(s *Bin) {\n\t\t\t\ts.sendEOS()\n\t\t\t\twg.Done()\n\t\t\t}(src)\n\t\t}\n\t\twg.Wait()\n\t} else if len(b.elements) > 0 {\n\t\tb.bin.SendEvent(gst.NewEOSEvent())\n\t}\n}\n\n// AddOnEOSReceived adds a callback to be called when EOS is received on every pad of the last element in the bin\nfunc (b *Bin) AddOnEOSReceived(f func()) error {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tif len(b.elements) == 0 {\n\t\treturn nil\n\t}\n\n\tsink := b.elements[len(b.elements)-1]\n\tsinkPads, err := sink.GetSinkPads()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar expecting atomic.Int32\n\texpecting.Add(int32(len(sinkPads)))\n\n\tfor _, sinkPad := range sinkPads {\n\t\tsinkPad.AddProbe(gst.PadProbeTypeEventDownstream, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn {\n\t\t\tif event := info.GetEvent(); event != nil && event.Type() == gst.EventTypeEOS {\n\t\t\t\tif expecting.Dec() == 0 {\n\t\t\t\t\tf()\n\t\t\t\t}\n\t\t\t\treturn gst.PadProbeRemove\n\t\t\t}\n\t\t\treturn gst.PadProbeOK\n\t\t})\n\t}\n\n\treturn nil\n}\n\n// ----- Internal -----\n\nfunc (b *Bin) link() error {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tfor _, src := range b.srcs {\n\t\tif err := src.link(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor _, sink := range b.sinks {\n\t\tif err := sink.link(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(b.elements) > 0 {\n\t\tif b.linkFunc != nil {\n\t\t\tif err := b.linkFunc(b.elements); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\t// link elements\n\t\t\tif err := gst.ElementLinkMany(b.elements...); err != nil {\n\t\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t\t}\n\t\t}\n\n\t\tfor _, src := range getPeerSrcs(b.srcs) {\n\t\t\tsrc.mu.Lock()\n\t\t\terr := linkPeersLocked(src, b)\n\t\t\tsrc.mu.Unlock()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tfor _, sink := range getPeerSinks(b.sinks) {\n\t\t\tsink.mu.Lock()\n\t\t\terr := linkPeersLocked(b, sink)\n\t\t\tsink.mu.Unlock()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// link src bins to sink bins\n\t\tsrcs := getPeerSrcs(b.srcs)\n\t\tsinks := getPeerSinks(b.sinks)\n\n\t\taddQueues := len(sinks) > 1\n\t\tfor _, src := range srcs {\n\t\t\tsrc.mu.Lock()\n\t\t\tfor _, sink := range sinks {\n\t\t\t\tsink.mu.Lock()\n\t\t\t\tvar err error\n\t\t\t\tif addQueues {\n\t\t\t\t\terr = b.queueLinkPeersLocked(src, sink)\n\t\t\t\t} else {\n\t\t\t\t\terr = linkPeersLocked(src, sink)\n\t\t\t\t}\n\t\t\t\tsink.mu.Unlock()\n\t\t\t\tif err != nil {\n\t\t\t\t\tsrc.mu.Unlock()\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tsrc.mu.Unlock()\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc linkPeersLocked(src, sink *Bin) error {\n\tsrcPad, sinkPad, err := createGhostPadsLocked(src, sink, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrcState := src.bin.GetCurrentState()\n\tsinkState := sink.bin.GetCurrentState()\n\n\tif srcState != sinkState {\n\t\tif srcState == gst.StateNull {\n\t\t\tsrcPad.AddProbe(gst.PadProbeTypeBlockDownstream, func(_ *gst.Pad, _ *gst.PadProbeInfo) gst.PadProbeReturn {\n\t\t\t\tif padReturn := srcPad.Link(sinkPad.Pad); padReturn != gst.PadLinkOK {\n\t\t\t\t\tlogger.Errorw(\"failed to link\", errors.ErrPadLinkFailed(src.bin.GetName(), sink.bin.GetName(), padReturn.String()))\n\t\t\t\t}\n\t\t\t\treturn gst.PadProbeRemove\n\t\t\t})\n\t\t\treturn src.SetState(gst.StatePlaying)\n\t\t}\n\n\t\tif sinkState == gst.StateNull {\n\t\t\tsrcPad.AddProbe(gst.PadProbeTypeBlockDownstream, func(_ *gst.Pad, _ *gst.PadProbeInfo) gst.PadProbeReturn {\n\t\t\t\tif err = sink.SetState(gst.StatePlaying); err != nil {\n\t\t\t\t\tsrc.OnError(errors.ErrGstPipelineError(err))\n\t\t\t\t\treturn gst.PadProbeHandled\n\t\t\t\t}\n\n\t\t\t\treturn gst.PadProbeRemove\n\t\t\t})\n\t\t}\n\t}\n\n\tif padReturn := srcPad.Link(sinkPad.Pad); padReturn != gst.PadLinkOK {\n\t\treturn errors.ErrPadLinkFailed(src.bin.GetName(), sink.bin.GetName(), padReturn.String())\n\t}\n\n\treturn nil\n}\n\nfunc (b *Bin) queueLinkPeersLocked(src, sink *Bin) error {\n\tsrcName := src.bin.GetName()\n\tsinkName := sink.bin.GetName()\n\n\tif (src.shouldLink != nil && !src.shouldLink(sinkName)) || (sink.shouldLink != nil && !sink.shouldLink(srcName)) {\n\t\treturn nil\n\t}\n\n\tqueueName := fmt.Sprintf(\"%s_%s_queue\", srcName, sinkName)\n\tqueue, err := BuildQueue(queueName, b.latency, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.queues[queueName] = queue\n\tif err = sink.bin.Add(queue); err != nil {\n\t\treturn err\n\t}\n\n\tsrcPad, sinkPad, err := createGhostPadsLocked(src, sink, queue)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif padReturn := srcPad.Link(sinkPad.Pad); padReturn != gst.PadLinkOK {\n\t\treturn errors.ErrPadLinkFailed(srcName, queueName, padReturn.String())\n\t}\n\n\treturn nil\n}\n\nfunc getPeerSrcs(srcs []*Bin) []*Bin {\n\tflattened := make([]*Bin, 0, len(srcs))\n\tfor _, src := range srcs {\n\t\tif len(src.elements) > 0 {\n\t\t\tflattened = append(flattened, src)\n\t\t} else {\n\t\t\tflattened = append(flattened, getPeerSrcs(src.srcs)...)\n\t\t}\n\t}\n\treturn flattened\n}\n\nfunc getPeerSinks(sinks []*Bin) []*Bin {\n\tflattened := make([]*Bin, 0, len(sinks))\n\tfor _, sink := range sinks {\n\t\tif len(sink.elements) > 0 {\n\t\t\tflattened = append(flattened, sink)\n\t\t} else {\n\t\t\tflattened = append(flattened, getPeerSinks(sink.sinks)...)\n\t\t}\n\t}\n\treturn flattened\n}\n"
  },
  {
    "path": "pkg/gstreamer/builder.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage gstreamer\n\nimport (\n\t\"time\"\n\n\t\"github.com/go-gst/go-gst/gst\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n)\n\nfunc BuildQueue(name string, latency time.Duration, leaky bool) (*gst.Element, error) {\n\tqueue, err := gst.NewElementWithName(\"queue\", name)\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif latency > 0 {\n\t\tif err = queue.SetProperty(\"max-size-time\", uint64(latency)); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = queue.SetProperty(\"max-size-bytes\", uint(0)); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = queue.SetProperty(\"max-size-buffers\", uint(0)); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t}\n\tif leaky {\n\t\tqueue.SetArg(\"leaky\", \"downstream\")\n\t\tNewLeakyQueueMonitor(name, queue)\n\t}\n\n\treturn queue, nil\n}\n\nfunc BuildAudioRate(name string, tolerance time.Duration) (*gst.Element, error) {\n\taudioRate, err := gst.NewElementWithName(\"audiorate\", name)\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tif err = audioRate.SetProperty(\"skip-to-first\", true); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tif err = audioRate.SetProperty(\"tolerance\", uint64(tolerance)); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\treturn audioRate, nil\n}\n"
  },
  {
    "path": "pkg/gstreamer/callbacks.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage gstreamer\n\nimport (\n\t\"github.com/frostbyte73/core\"\n\t\"github.com/linkdata/deadlock\"\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n)\n\ntype Callbacks struct {\n\tmu         deadlock.RWMutex\n\tGstReady   chan struct{}\n\tBuildReady chan struct{}\n\n\t// upstream callbacks\n\tonError           func(error)\n\tonStop            []func() error\n\tonDebugDotRequest func(string)\n\n\t// source callbacks\n\tonTrackAdded     []func(*config.TrackSource)\n\tonTrackMuted     []func(string)\n\tonTrackUnmuted   []func(string)\n\tonTrackRemoved   []func(string)\n\tonSourceBinReset []func(*config.TrackSource) error\n\tonEOSSent        func()\n\n\tpipelinePaused core.Fuse\n}\n\nfunc (c *Callbacks) SetOnError(f func(error)) {\n\tc.mu.Lock()\n\tc.onError = f\n\tc.mu.Unlock()\n}\n\nfunc (c *Callbacks) OnError(err error) {\n\tc.mu.RLock()\n\tonError := c.onError\n\tc.mu.RUnlock()\n\n\tif onError != nil {\n\t\tonError(err)\n\t}\n}\n\nfunc (c *Callbacks) SetOnDebugDotRequest(f func(string)) {\n\tc.mu.Lock()\n\tc.onDebugDotRequest = f\n\tc.mu.Unlock()\n}\n\nfunc (c *Callbacks) OnDebugDotRequest(reason string) {\n\tc.mu.RLock()\n\tonDebugDotRequest := c.onDebugDotRequest\n\tc.mu.RUnlock()\n\n\tif onDebugDotRequest != nil {\n\t\tonDebugDotRequest(reason)\n\t}\n}\n\nfunc (c *Callbacks) PipelinePaused() <-chan struct{} {\n\treturn c.pipelinePaused.Watch()\n}\n\nfunc (c *Callbacks) OnPipelinePaused() {\n\tc.pipelinePaused.Break()\n}\n\nfunc (c *Callbacks) AddOnStop(f func() error) {\n\tc.mu.Lock()\n\tc.onStop = append(c.onStop, f)\n\tc.mu.Unlock()\n}\n\nfunc (c *Callbacks) OnStop() error {\n\tc.mu.RLock()\n\tonStop := c.onStop\n\tc.mu.RUnlock()\n\n\terrArray := &errors.ErrArray{}\n\tfor _, f := range onStop {\n\t\terrArray.Check(f())\n\t}\n\treturn errArray.ToError()\n}\n\nfunc (c *Callbacks) AddOnTrackAdded(f func(*config.TrackSource)) {\n\tc.mu.Lock()\n\tc.onTrackAdded = append(c.onTrackAdded, f)\n\tc.mu.Unlock()\n}\n\nfunc (c *Callbacks) OnTrackAdded(ts *config.TrackSource) {\n\tc.mu.RLock()\n\tonTrackAdded := c.onTrackAdded\n\tc.mu.RUnlock()\n\n\tfor _, f := range onTrackAdded {\n\t\tf(ts)\n\t}\n}\n\nfunc (c *Callbacks) AddOnTrackMuted(f func(string)) {\n\tc.mu.Lock()\n\tc.onTrackMuted = append(c.onTrackMuted, f)\n\tc.mu.Unlock()\n}\n\nfunc (c *Callbacks) OnTrackMuted(trackID string) {\n\tc.mu.RLock()\n\tonTrackMuted := c.onTrackMuted\n\tc.mu.RUnlock()\n\n\tfor _, f := range onTrackMuted {\n\t\tf(trackID)\n\t}\n}\n\nfunc (c *Callbacks) AddOnTrackUnmuted(f func(string)) {\n\tc.mu.Lock()\n\tc.onTrackUnmuted = append(c.onTrackUnmuted, f)\n\tc.mu.Unlock()\n}\n\nfunc (c *Callbacks) OnTrackUnmuted(trackID string) {\n\tc.mu.RLock()\n\tonTrackUnmuted := c.onTrackUnmuted\n\tc.mu.RUnlock()\n\n\tfor _, f := range onTrackUnmuted {\n\t\tf(trackID)\n\t}\n}\n\nfunc (c *Callbacks) AddOnTrackRemoved(f func(string)) {\n\tc.mu.Lock()\n\tc.onTrackRemoved = append(c.onTrackRemoved, f)\n\tc.mu.Unlock()\n}\n\nfunc (c *Callbacks) OnTrackRemoved(trackID string) {\n\tc.mu.RLock()\n\tonTrackRemoved := c.onTrackRemoved\n\tc.mu.RUnlock()\n\n\tfor _, f := range onTrackRemoved {\n\t\tf(trackID)\n\t}\n}\n\nfunc (c *Callbacks) AddOnSourceBinReset(f func(*config.TrackSource) error) {\n\tc.mu.Lock()\n\tc.onSourceBinReset = append(c.onSourceBinReset, f)\n\tc.mu.Unlock()\n}\n\n// OnSourceBinReset calls registered handlers to force-remove a stuck source bin and\n// replace it with a new one. Each handler checks the track kind and returns nil if\n// not applicable. The first handler that returns a non-nil error aborts the operation.\n// On success, ts.AppSrc is updated to the new appsrc by the handler.\nfunc (c *Callbacks) OnSourceBinReset(ts *config.TrackSource) error {\n\tc.mu.RLock()\n\thandlers := c.onSourceBinReset\n\tc.mu.RUnlock()\n\n\tfor _, f := range handlers {\n\t\tif err := f(ts); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *Callbacks) SetOnEOSSent(f func()) {\n\tc.mu.Lock()\n\tc.onEOSSent = f\n\tc.mu.Unlock()\n}\n\nfunc (c *Callbacks) OnEOSSent() {\n\tc.mu.RLock()\n\tonEOSSent := c.onEOSSent\n\tc.mu.RUnlock()\n\n\tif onEOSSent != nil {\n\t\tonEOSSent()\n\t}\n}\n"
  },
  {
    "path": "pkg/gstreamer/pads.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage gstreamer\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/go-gst/go-gst/gst\"\n\t\"go.uber.org/atomic\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\ntype padTemplate struct {\n\telement   *gst.Element\n\ttemplate  *gst.PadTemplate\n\tcapsNames map[string]struct{}\n\tdataTypes map[string]struct{}\n}\n\nfunc (p *padTemplate) toPad() *gst.Pad {\n\tif p.template.Presence() == gst.PadPresenceAlways {\n\t\treturn p.element.GetStaticPad(p.template.Name())\n\t}\n\treturn p.element.GetRequestPad(p.template.Name())\n}\n\nfunc (p *padTemplate) findDirectMatch(others []*padTemplate) *padTemplate {\n\tfor _, other := range others {\n\t\tfor capsName := range p.capsNames {\n\t\t\tif _, ok := other.capsNames[capsName]; ok {\n\t\t\t\treturn other\n\t\t\t}\n\t\t}\n\t\tfor dataType := range p.dataTypes {\n\t\t\tif _, ok := other.dataTypes[dataType]; ok {\n\t\t\t\treturn other\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (p *padTemplate) findAnyMatch(others []*padTemplate) *padTemplate {\n\tfor _, other := range others {\n\t\tif _, ok := p.dataTypes[\"ANY\"]; ok {\n\t\t\treturn other\n\t\t}\n\t\tif _, ok := other.dataTypes[\"ANY\"]; ok {\n\t\t\treturn other\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc createGhostPadsLocked(src, sink *Bin, queue *gst.Element) (*gst.GhostPad, *gst.GhostPad, error) {\n\tsrcName := src.bin.GetName()\n\tsinkName := sink.bin.GetName()\n\n\tsrcPad, sinkPad, err := matchPadsLocked(src, sink)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\teosSeen := &atomic.Bool{}\n\tsrc.eosSeen[sinkName] = eosSeen\n\tsrcPad.AddProbe(gst.PadProbeTypeEventDownstream, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn {\n\t\tif event := info.GetEvent(); event != nil && event.Type() == gst.EventTypeEOS {\n\t\t\teosSeen.Store(true)\n\t\t}\n\t\treturn gst.PadProbeOK\n\t})\n\n\tsrcGhostPad := gst.NewGhostPad(fmt.Sprintf(\"%s_%s_sink\", srcName, sinkName), srcPad)\n\tsrc.pads[sinkName] = srcGhostPad\n\tsrc.bin.AddPad(srcGhostPad.Pad)\n\n\tif queue != nil {\n\t\tif padReturn := queue.GetStaticPad(\"src\").Link(sinkPad); padReturn != gst.PadLinkOK {\n\t\t\treturn nil, nil, errors.ErrPadLinkFailed(queue.GetName(), sinkName, padReturn.String())\n\t\t}\n\n\t\tsinkGhostPad := gst.NewGhostPad(fmt.Sprintf(\"%s_%s_src\", srcName, sinkName), queue.GetStaticPad(\"sink\"))\n\t\tsink.pads[srcName] = sinkGhostPad\n\t\tsink.bin.AddPad(sinkGhostPad.Pad)\n\t\treturn srcGhostPad, sinkGhostPad, nil\n\t}\n\n\tsinkGhostPad := gst.NewGhostPad(fmt.Sprintf(\"%s_%s_src\", srcName, sinkName), sinkPad)\n\tsink.pads[srcName] = sinkGhostPad\n\tsink.bin.AddPad(sinkGhostPad.Pad)\n\treturn srcGhostPad, sinkGhostPad, nil\n}\n\nfunc matchPadsLocked(src, sink *Bin) (*gst.Pad, *gst.Pad, error) {\n\tvar srcPad, sinkPad *gst.Pad\n\tvar srcTemplates, sinkTemplates []*padTemplate\n\tif src.getSinkPad != nil {\n\t\tsrcPad = src.getSinkPad(sink.bin.GetName())\n\t} else {\n\t\tsrcTemplates = src.getPadTemplatesLocked(gst.PadDirectionSource)\n\t}\n\tif sink.getSrcPad != nil {\n\t\tsinkPad = sink.getSrcPad(src.bin.GetName())\n\t} else {\n\t\tsinkTemplates = sink.getPadTemplatesLocked(gst.PadDirectionSink)\n\t}\n\n\tswitch {\n\tcase srcPad != nil && sinkPad != nil:\n\t\treturn srcPad, sinkPad, nil\n\tcase srcPad != nil && len(sinkTemplates) == 1:\n\t\treturn srcPad, sinkTemplates[0].toPad(), nil\n\tcase sinkPad != nil && len(srcTemplates) == 1:\n\t\treturn srcTemplates[0].toPad(), sinkPad, nil\n\tcase len(srcTemplates) > 0 && len(sinkTemplates) > 0:\n\t\tfor _, srcTemplate := range srcTemplates {\n\t\t\tif sinkTemplate := srcTemplate.findDirectMatch(sinkTemplates); sinkTemplate != nil {\n\t\t\t\treturn srcTemplate.toPad(), sinkTemplate.toPad(), nil\n\t\t\t}\n\t\t}\n\t\tfor _, srcTemplate := range srcTemplates {\n\t\t\tif sinkTemplate := srcTemplate.findAnyMatch(sinkTemplates); sinkTemplate != nil {\n\t\t\t\treturn srcTemplate.toPad(), sinkTemplate.toPad(), nil\n\t\t\t}\n\t\t}\n\t}\n\n\tlogger.Warnw(\"could not match pads\", nil,\n\t\t\"src\", src.bin.GetName(), \"sink\", sink.bin.GetName(),\n\t\t\"srcTemplates\", srcTemplates, \"sinkTemplates\", sinkTemplates)\n\treturn nil, nil, errors.ErrGhostPadFailed\n}\n\nfunc (b *Bin) getPadTemplatesLocked(direction gst.PadDirection) []*padTemplate {\n\tvar element *gst.Element\n\tif direction == gst.PadDirectionSource {\n\t\telement = b.elements[len(b.elements)-1]\n\t} else {\n\t\telement = b.elements[0]\n\t}\n\n\tallTemplates := element.GetPadTemplates()\n\ttemplates := make([]*padTemplate, 0)\n\n\tfor _, template := range allTemplates {\n\t\tif template.Direction() == direction {\n\t\t\tt := &padTemplate{\n\t\t\t\telement:   element,\n\t\t\t\ttemplate:  template,\n\t\t\t\tcapsNames: make(map[string]struct{}),\n\t\t\t\tdataTypes: make(map[string]struct{}),\n\t\t\t}\n\n\t\t\tcaps := template.Caps()\n\t\t\tif caps.IsAny() {\n\t\t\t\tif strings.HasPrefix(template.Name(), direction.String()) {\n\t\t\t\t\t// src/src_%u/sink/sink_%u pad\n\t\t\t\t\tcapsNames, dataTypes, ok := b.getTypesLocked(direction)\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tt.capsNames = capsNames\n\t\t\t\t\t\tt.dataTypes = dataTypes\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.dataTypes[\"ANY\"] = struct{}{}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// audio/audio_%u/video/video_%u pad\n\t\t\t\t\tdataType := template.Name()\n\t\t\t\t\tdataType = strings.TrimSuffix(dataType, \"_%u\")\n\t\t\t\t\tt.dataTypes[dataType] = struct{}{}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// pad has caps\n\t\t\t\tsplitCaps := strings.Split(caps.String(), \"; \")\n\t\t\t\tfor _, c := range splitCaps {\n\t\t\t\t\tcapsName := strings.SplitN(c, \",\", 2)[0]\n\t\t\t\t\tt.capsNames[capsName] = struct{}{}\n\t\t\t\t\tt.dataTypes[strings.Split(capsName, \"/\")[0]] = struct{}{}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttemplates = append(templates, t)\n\t\t}\n\t}\n\n\treturn templates\n}\n\nfunc (b *Bin) getTypesLocked(direction gst.PadDirection) (map[string]struct{}, map[string]struct{}, bool) {\n\tvar i int\n\tif direction == gst.PadDirectionSource {\n\t\ti = len(b.elements) - 1\n\t}\n\n\tfor i >= 0 && i < len(b.elements) {\n\t\tallTemplates := b.elements[i].GetPadTemplates()\n\t\tfor _, template := range allTemplates {\n\t\t\tif template.Direction() == gst.PadDirectionSource {\n\t\t\t\tif caps := template.Caps(); !caps.IsAny() {\n\t\t\t\t\tcapsNames := make(map[string]struct{})\n\t\t\t\t\tdataTypes := make(map[string]struct{})\n\t\t\t\t\tsplitCaps := strings.Split(caps.String(), \";\")\n\t\t\t\t\tfor _, c := range splitCaps {\n\t\t\t\t\t\tcapsName := strings.SplitN(c, \",\", 2)[0]\n\t\t\t\t\t\tcapsNames[capsName] = struct{}{}\n\t\t\t\t\t\tdataTypes[strings.Split(capsName, \"/\")[0]] = struct{}{}\n\t\t\t\t\t}\n\t\t\t\t\treturn capsNames, dataTypes, true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif direction == gst.PadDirectionSource {\n\t\t\ti--\n\t\t} else {\n\t\t\ti++\n\t\t}\n\t}\n\n\tif direction == gst.PadDirectionSource {\n\t\tfor _, src := range b.srcs {\n\t\t\tsrc.mu.Lock()\n\t\t\tcapsNames, dataTypes, ok := src.getTypesLocked(direction)\n\t\t\tsrc.mu.Unlock()\n\t\t\tif ok {\n\t\t\t\treturn capsNames, dataTypes, true\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor _, sink := range b.sinks {\n\t\t\tsink.mu.Lock()\n\t\t\tcapsNames, dataTypes, ok := sink.getTypesLocked(direction)\n\t\t\tsink.mu.Unlock()\n\t\t\tif ok {\n\t\t\t\treturn capsNames, dataTypes, true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, nil, false\n}\n"
  },
  {
    "path": "pkg/gstreamer/pipeline.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage gstreamer\n\nimport (\n\t\"math\"\n\t\"time\"\n\n\t\"github.com/go-gst/go-glib/glib\"\n\t\"github.com/go-gst/go-gst/gst\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\nconst (\n\tstateChangeTimeout = time.Second * 15\n)\n\ntype Pipeline struct {\n\t*Bin\n\n\tloop          *glib.MainLoop\n\tbinsAdded     bool\n\telementsAdded bool\n}\n\n// A pipeline can have either elements or src and sink bins. If you add both you will get a wrong hierarchy error\n// Bins can contain both elements and src and sink bins\n\nfunc NewPipeline(name string, latency time.Duration, callbacks *Callbacks) (*Pipeline, error) {\n\tpipeline, err := gst.NewPipeline(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Pipeline{\n\t\tBin: &Bin{\n\t\t\tCallbacks:    callbacks,\n\t\t\tStateManager: &StateManager{},\n\t\t\tpipeline:     pipeline,\n\t\t\tbin:          pipeline.Bin,\n\t\t\tlatency:      latency,\n\t\t\tqueues:       make(map[string]*gst.Element),\n\t\t},\n\t\tloop: glib.NewMainLoop(glib.MainContextDefault(), false),\n\t}, nil\n}\n\nfunc (p *Pipeline) AddSourceBin(src *Bin) error {\n\tif p.elementsAdded {\n\t\treturn errors.ErrWrongHierarchy\n\t}\n\tp.binsAdded = true\n\treturn p.Bin.AddSourceBin(src)\n}\n\nfunc (p *Pipeline) AddSinkBin(sink *Bin) error {\n\tif p.elementsAdded {\n\t\treturn errors.ErrWrongHierarchy\n\t}\n\tp.binsAdded = true\n\treturn p.Bin.AddSinkBin(sink)\n}\n\nfunc (p *Pipeline) AddElement(e *gst.Element) error {\n\tif p.binsAdded {\n\t\treturn errors.ErrWrongHierarchy\n\t}\n\tp.elementsAdded = true\n\treturn p.Bin.AddElement(e)\n}\n\nfunc (p *Pipeline) AddElements(elements ...*gst.Element) error {\n\tif p.binsAdded {\n\t\treturn errors.ErrWrongHierarchy\n\t}\n\tp.elementsAdded = true\n\treturn p.Bin.AddElements(elements...)\n}\n\nfunc (p *Pipeline) Link() error {\n\treturn p.link()\n}\n\nfunc (p *Pipeline) SetWatch(watch func(msg *gst.Message) bool) {\n\tp.pipeline.GetPipelineBus().AddWatch(watch)\n}\n\nfunc (p *Pipeline) SetState(state gst.State) error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tstateErr := make(chan error, 1)\n\tgo func() {\n\t\tstateErr <- p.pipeline.SetState(state)\n\t}()\n\n\tselect {\n\tcase <-time.After(stateChangeTimeout):\n\t\treturn errors.ErrPipelineFrozen\n\tcase err := <-stateErr:\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (p *Pipeline) Run() error {\n\tif err := p.SetState(gst.StatePlaying); err != nil {\n\t\treturn err\n\t}\n\tif _, ok := p.UpgradeState(StateRunning); ok {\n\t\tp.loop.Run()\n\t}\n\n\treturn nil\n}\n\nfunc (p *Pipeline) SendEOS() {\n\told, ok := p.UpgradeState(StateEOS)\n\tif ok {\n\t\tif old >= StateRunning {\n\t\t\tp.sendEOS()\n\t\t} else {\n\t\t\tp.Stop()\n\t\t}\n\t}\n}\n\nfunc (p *Pipeline) Stop() {\n\tlogger.Debugw(\"stopping pipeline\")\n\told, ok := p.UpgradeState(StateStopping)\n\tif !ok {\n\t\treturn\n\t}\n\n\tif err := p.OnStop(); err != nil {\n\t\tp.OnError(err)\n\t}\n\tif err := p.SetState(gst.StateNull); err != nil {\n\t\tlogger.Errorw(\"failed to set pipeline to null\", err)\n\t}\n\n\tif old >= StateRunning {\n\t\tp.loop.Quit()\n\t}\n\n\tp.UpgradeState(StateFinished)\n}\n\nfunc (p *Pipeline) DebugBinToDotData(details gst.DebugGraphDetails) string {\n\treturn p.pipeline.DebugBinToDotData(details)\n}\n\n// RunningTime returns the running time of the gst pipeline\nfunc (p *Pipeline) RunningTime() (time.Duration, bool) {\n\tclock := p.pipeline.GetPipelineClock()\n\tif clock == nil {\n\t\treturn 0, false\n\t}\n\n\tclockTime := clock.GetTime()\n\tif clockTime == gst.ClockTimeNone {\n\t\treturn 0, false\n\t}\n\n\tbaseTime := p.pipeline.GetBaseTime()\n\tif baseTime == gst.ClockTimeNone {\n\t\treturn 0, false\n\t}\n\n\tclockValue := uint64(clockTime)\n\tbaseValue := uint64(baseTime)\n\tif clockValue < baseValue {\n\t\treturn 0, false\n\t}\n\n\tdelta := clockValue - baseValue\n\tif delta > uint64(math.MaxInt64) {\n\t\treturn time.Duration(math.MaxInt64), false\n\t}\n\n\treturn time.Duration(int64(delta)), true\n}\n\n// PlayheadPosition returns the playhead position of the gst pipeline\n// It is equivalent to the last timestamp seen by a sink element\nfunc (p *Pipeline) PlayheadPosition() (time.Duration, bool) {\n\tok, position := p.pipeline.QueryPosition(gst.FormatTime)\n\tif !ok || position < 0 {\n\t\treturn 0, false\n\t}\n\n\treturn time.Duration(position), true\n}\n"
  },
  {
    "path": "pkg/gstreamer/queue_monitor.go",
    "content": "// Copyright 2026 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage gstreamer\n\nimport (\n\t\"github.com/go-gst/go-gst/gst\"\n\t\"go.uber.org/atomic\"\n\n\t\"github.com/livekit/protocol/logger\"\n)\n\n// LeakyQueueMonitor tracks buffer flow through a leaky queue to detect dropped buffers.\n// It uses pad probes to count buffers in and out, then calculates drops as:\n// dropped = inCount - outCount\ntype LeakyQueueMonitor struct {\n\tname     string\n\tqueue    *gst.Element\n\tinCount  atomic.Uint64\n\toutCount atomic.Uint64\n\teosSeen  atomic.Bool\n}\n\n// NewLeakyQueueMonitor creates a monitor for the given queue element and attaches\n// pad probes to track buffer flow.\nfunc NewLeakyQueueMonitor(name string, queue *gst.Element) {\n\tm := &LeakyQueueMonitor{\n\t\tname:  name,\n\t\tqueue: queue,\n\t}\n\n\tsinkPad := queue.GetStaticPad(\"sink\")\n\tif sinkPad != nil {\n\t\tsinkPad.AddProbe(gst.PadProbeTypeBuffer, func(_ *gst.Pad, _ *gst.PadProbeInfo) gst.PadProbeReturn {\n\t\t\tm.inCount.Inc()\n\t\t\treturn gst.PadProbeOK\n\t\t})\n\t} else {\n\t\tlogger.Warnw(\"failed to get sink pad for queue monitor\", nil, \"queue\", name)\n\t}\n\n\tsrcPad := queue.GetStaticPad(\"src\")\n\tif srcPad != nil {\n\t\tsrcPad.AddProbe(gst.PadProbeTypeBuffer, func(_ *gst.Pad, _ *gst.PadProbeInfo) gst.PadProbeReturn {\n\t\t\tm.outCount.Inc()\n\t\t\treturn gst.PadProbeOK\n\t\t})\n\t\tsrcPad.AddProbe(gst.PadProbeTypeEventDownstream, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn {\n\t\t\tif event := info.GetEvent(); event != nil && event.Type() == gst.EventTypeEOS {\n\t\t\t\tif !m.eosSeen.Swap(true) {\n\t\t\t\t\tm.postEOSStats()\n\t\t\t\t}\n\t\t\t\treturn gst.PadProbeRemove\n\t\t\t}\n\t\t\treturn gst.PadProbeOK\n\t\t})\n\t} else {\n\t\tlogger.Warnw(\"failed to get src pad for queue monitor\", nil, \"queue\", name)\n\t}\n}\n\nconst LeakyQueueStatsMessage = \"LeakyQueueStats\"\n\nfunc (m *LeakyQueueMonitor) postEOSStats() {\n\tif m.queue == nil {\n\t\treturn\n\t}\n\tinCount := m.inCount.Load()\n\toutCount := m.outCount.Load()\n\tdropped := uint64(0)\n\tif outCount <= inCount {\n\t\tdropped = inCount - outCount\n\t}\n\n\tst := gst.NewStructure(LeakyQueueStatsMessage)\n\terr := st.SetValue(\"queue\", m.name)\n\tif err != nil {\n\t\tlogger.Debugw(\"failed to set queue name\", err, \"queue\", m.name)\n\t\treturn\n\t}\n\terr = st.SetValue(\"in\", inCount)\n\tif err != nil {\n\t\tlogger.Debugw(\"failed to set in count\", err, \"queue\", m.name)\n\t\treturn\n\t}\n\terr = st.SetValue(\"out\", outCount)\n\tif err != nil {\n\t\tlogger.Debugw(\"failed to set out count\", err, \"queue\", m.name)\n\t\treturn\n\t}\n\terr = st.SetValue(\"dropped\", dropped)\n\tif err != nil {\n\t\tlogger.Debugw(\"failed to set dropped count\", err, \"queue\", m.name)\n\t\treturn\n\t}\n\tmsg := gst.NewElementMessage(m.queue, st)\n\tif msg == nil {\n\t\tlogger.Debugw(\"failed to build leaky queue stats message\", nil, \"queue\", m.name)\n\t\treturn\n\t}\n\tif ok := m.queue.PostMessage(msg); !ok {\n\t\tlogger.Debugw(\"failed to post leaky queue stats message\", nil, \"queue\", m.name)\n\t}\n}\n\n// Name returns the name of the monitored queue\nfunc (m *LeakyQueueMonitor) Name() string {\n\treturn m.name\n}\n"
  },
  {
    "path": "pkg/gstreamer/state.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage gstreamer\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/linkdata/deadlock\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\ntype State int\n\nconst (\n\tStateBuilding State = iota\n\tStateStarted\n\tStateRunning\n\tStateEOS\n\tStateStopping\n\tStateFinished\n)\n\ntype StateManager struct {\n\tlock  deadlock.RWMutex\n\tstate State\n}\n\nfunc (s *StateManager) GetState() State {\n\ts.lock.RLock()\n\tdefer s.lock.RUnlock()\n\n\treturn s.state\n}\n\nfunc (s *StateManager) GetStateLocked() State {\n\treturn s.state\n}\n\nfunc (s *StateManager) LockState() {\n\ts.lock.Lock()\n}\n\nfunc (s *StateManager) UnlockState() {\n\ts.lock.Unlock()\n}\n\nfunc (s *StateManager) LockStateShared() {\n\ts.lock.RLock()\n}\n\nfunc (s *StateManager) UnlockStateShared() {\n\ts.lock.RUnlock()\n}\n\nfunc (s *StateManager) UpgradeState(state State) (State, bool) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\told := s.state\n\tif old >= state {\n\t\treturn old, false\n\t}\n\tlogger.Debugw(fmt.Sprintf(\"pipeline state %v -> %v\", old, state))\n\ts.state = state\n\treturn old, true\n}\n\nfunc (s State) String() string {\n\tswitch s {\n\tcase StateBuilding:\n\t\treturn \"building\"\n\tcase StateStarted:\n\t\treturn \"starting\"\n\tcase StateRunning:\n\t\treturn \"running\"\n\tcase StateEOS:\n\t\treturn \"eos\"\n\tcase StateStopping:\n\t\treturn \"stopping\"\n\tcase StateFinished:\n\t\treturn \"finished\"\n\tdefault:\n\t\treturn \"unknown\"\n\t}\n}\n"
  },
  {
    "path": "pkg/gstreamer/time_provider.go",
    "content": "// Copyright 2025 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage gstreamer\n\nimport (\n\t\"time\"\n)\n\n// TimeProvider supplies the running time and playhead position of a pipeline.\ntype TimeProvider interface {\n\tRunningTime() (time.Duration, bool)\n\tPlayheadPosition() (time.Duration, bool)\n}\n\nvar _ TimeProvider = (*nopTimeProvider)(nil)\n\ntype nopTimeProvider struct{}\n\n// NopTimeProvider returns a TimeProvider that always reports unavailable times.\nfunc NopTimeProvider() TimeProvider {\n\treturn &nopTimeProvider{}\n}\n\nfunc (n *nopTimeProvider) RunningTime() (time.Duration, bool) {\n\treturn 0, false\n}\n\nfunc (n *nopTimeProvider) PlayheadPosition() (time.Duration, bool) {\n\treturn 0, false\n}\n"
  },
  {
    "path": "pkg/handler/handler.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage handler\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/frostbyte73/core\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/collectors\"\n\t\"google.golang.org/grpc\"\n\n\t\"go.opentelemetry.io/otel\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/ipc\"\n\t\"github.com/livekit/egress/pkg/pipeline\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/rpc\"\n\t\"github.com/livekit/psrpc\"\n)\n\ntype Handler struct {\n\tipc.UnimplementedEgressHandlerServer\n\n\tconf             *config.PipelineConfig\n\tcontroller       *pipeline.Controller\n\trpcServer        rpc.EgressHandlerServer\n\tipcHandlerServer *grpc.Server\n\tipcServiceClient ipc.EgressServiceClient\n\tinitialized      core.Fuse\n\tkill             core.Fuse\n}\n\nvar (\n\ttracer = otel.Tracer(\"github.com/livekit/egress/pkg/handler\")\n)\n\nfunc NewHandler(conf *config.PipelineConfig, bus psrpc.MessageBus) (*Handler, error) {\n\t// Register all GO process metrics\n\tprometheus.Unregister(collectors.NewGoCollector())\n\tprometheus.MustRegister(collectors.NewGoCollector(collectors.WithGoCollectorRuntimeMetrics(collectors.MetricsAll)))\n\n\tipcClient, err := ipc.NewServiceClient(path.Join(config.TmpDir, conf.NodeID))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconf.StorageObserver = &ipcStorageObserver{client: ipcClient}\n\n\th := &Handler{\n\t\tconf:             conf,\n\t\tipcHandlerServer: grpc.NewServer(),\n\t\tipcServiceClient: ipcClient,\n\t}\n\n\tipc.RegisterEgressHandlerServer(h.ipcHandlerServer, h)\n\tif err = ipc.StartHandlerListener(h.ipcHandlerServer, path.Join(config.TmpDir, conf.HandlerID)); err != nil {\n\t\treturn nil, err\n\t}\n\n\trpcServer, err := rpc.NewEgressHandlerServer(h, bus)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = rpcServer.RegisterUpdateStreamTopic(conf.Info.EgressId); err != nil {\n\t\treturn nil, err\n\t}\n\tif err = rpcServer.RegisterStopEgressTopic(conf.Info.EgressId); err != nil {\n\t\treturn nil, err\n\t}\n\tif err = rpcServer.RegisterUpdateEgressTopic(conf.Info.EgressId); err != nil {\n\t\treturn nil, err\n\t}\n\th.rpcServer = rpcServer\n\n\t_, err = h.ipcServiceClient.HandlerReady(context.Background(), &ipc.HandlerReadyRequest{EgressId: conf.Info.EgressId})\n\tif err != nil {\n\t\tlogger.Errorw(\"failed to notify service\", err)\n\t\treturn nil, err\n\t}\n\n\treturn h, nil\n}\n\nfunc (h *Handler) Run() {\n\tctx, span := tracer.Start(context.Background(), \"Handler.Run\")\n\tdefer span.End()\n\n\tdefer func() {\n\t\th.rpcServer.Shutdown()\n\t\th.ipcHandlerServer.Stop()\n\t}()\n\n\tvar err error\n\tegressID := h.conf.Info.EgressId\n\n\tif h.shouldInjectEgressFailure() {\n\t\tlogger.Infow(\"injecting egress failure\", \"egressID\", egressID)\n\t\terr = errors.New(\"test failure injection\")\n\t\th.conf.Info.SetFailed(err)\n\t\t_, err = h.ipcServiceClient.HandlerUpdate(context.Background(), h.conf.Info)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"egress update ipc call failed\", err, \"egressID\", egressID)\n\t\t}\n\t\treturn\n\t}\n\n\th.controller, err = pipeline.New(context.Background(), h.conf, h.ipcServiceClient)\n\th.initialized.Break()\n\tif err != nil {\n\t\th.conf.Info.SetFailed(err)\n\t\t_, err = h.ipcServiceClient.HandlerUpdate(context.Background(), h.conf.Info)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"egress update ipc call failed\", err, \"egressID\", egressID)\n\t\t}\n\t\treturn\n\t}\n\n\t// Replay coordination: signal ready and get timing\n\tif h.conf.IsReplay() {\n\t\trctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)\n\t\tresp, err := h.ipcServiceClient.ReplayReady(rctx, &rpc.EgressReadyRequest{\n\t\t\tEgressId: h.conf.Info.EgressId,\n\t\t})\n\t\tcancel()\n\t\tif err != nil {\n\t\t\th.conf.Info.SetFailed(err)\n\t\t\t_, _ = h.ipcServiceClient.HandlerUpdate(context.Background(), h.conf.Info)\n\t\t\treturn\n\t\t}\n\t\th.controller.SetReplayTiming(resp.StartAt, resp.DurationMs)\n\t}\n\n\t// start egress\n\tres := h.controller.Run(ctx)\n\tm, err := h.GenerateMetrics(ctx)\n\tif err != nil {\n\t\tlogger.Errorw(\"failed to generate handler metrics\", err, \"egressID\", egressID)\n\t}\n\n\t_, err = h.ipcServiceClient.HandlerFinished(ctx, &ipc.HandlerFinishedRequest{\n\t\tEgressId: egressID,\n\t\tMetrics:  m,\n\t\tInfo:     res,\n\t})\n\tif err != nil {\n\t\tlogger.Errorw(\"egress finished ipc call failed\", err, \"egressID\", egressID)\n\t}\n}\n\nfunc (h *Handler) Kill() {\n\t<-h.initialized.Watch()\n\tif h.controller == nil {\n\t\treturn\n\t}\n\th.controller.SendEOS(context.Background(), livekit.EndReasonKilled)\n}\n\nfunc (h *Handler) shouldInjectEgressFailure() bool {\n\tif h.conf.TestOverrides.FailureInjectionRoom == \"\" {\n\t\treturn false\n\t}\n\tif h.conf.Info.RetryCount > 0 {\n\t\treturn false\n\t}\n\treturn strings.Contains(h.conf.Info.RoomName, h.conf.TestOverrides.FailureInjectionRoom)\n}\n\ntype ipcStorageObserver struct {\n\tclient ipc.EgressServiceClient\n}\n\nfunc (o *ipcStorageObserver) OnStorageEvent(egressID, operation, path string, size, lifetimeDays int64) {\n\t_, err := o.client.StorageEvent(context.Background(), &ipc.StorageEventRequest{\n\t\tEgressId:     egressID,\n\t\tOperation:    operation,\n\t\tPath:         path,\n\t\tSize:         size,\n\t\tLifetimeDays: lifetimeDays,\n\t})\n\tif err != nil {\n\t\tlogger.Errorw(\"storage event ipc call failed\", err, \"egressID\", egressID)\n\t}\n}\n"
  },
  {
    "path": "pkg/handler/handler_ipc.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage handler\n\nimport (\n\t\"context\"\n\t\"strings\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tdto \"github.com/prometheus/client_model/go\"\n\t\"github.com/prometheus/common/expfmt\"\n\t\"google.golang.org/protobuf/types/known/emptypb\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/ipc\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/pprof\"\n\t\"github.com/livekit/psrpc\"\n)\n\nfunc (h *Handler) GetPipelineDot(ctx context.Context, _ *ipc.GstPipelineDebugDotRequest) (*ipc.GstPipelineDebugDotResponse, error) {\n\t_, span := tracer.Start(ctx, \"Handler.GetPipelineDot\")\n\tdefer span.End()\n\n\t<-h.initialized.Watch()\n\tif h.controller == nil {\n\t\t// egress handler is shutting down on error\n\t\treturn nil, errors.ErrEgressNotFound\n\t}\n\n\tr, err := h.controller.GetGstPipelineDebugDot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ipc.GstPipelineDebugDotResponse{\n\t\tDotFile: r,\n\t}, nil\n}\n\nfunc (h *Handler) GetPProf(ctx context.Context, req *ipc.PProfRequest) (*ipc.PProfResponse, error) {\n\tctx, span := tracer.Start(ctx, \"Handler.GetPProf\")\n\tdefer span.End()\n\n\t<-h.initialized.Watch()\n\tif h.controller == nil {\n\t\t// egress handler is shutting down on error\n\t\treturn nil, errors.ErrEgressNotFound\n\t}\n\n\tb, err := pprof.GetProfileData(ctx, req.ProfileName, int(req.Timeout), int(req.Debug))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ipc.PProfResponse{\n\t\tPprofFile: b,\n\t}, nil\n}\n\n// GetMetrics implement the handler-side gathering of metrics to return over IPC\nfunc (h *Handler) GetMetrics(ctx context.Context, _ *ipc.MetricsRequest) (*ipc.MetricsResponse, error) {\n\tctx, span := tracer.Start(ctx, \"Handler.GetMetrics\")\n\tdefer span.End()\n\n\tmetricsAsString, err := h.GenerateMetrics(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ipc.MetricsResponse{\n\t\tMetrics: metricsAsString,\n\t}, nil\n}\n\nfunc (h *Handler) GenerateMetrics(_ context.Context) (string, error) {\n\tmetrics, err := prometheus.DefaultGatherer.Gather()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmetricsAsString, err := renderMetrics(metrics)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn metricsAsString, nil\n}\n\nfunc renderMetrics(metrics []*dto.MetricFamily) (string, error) {\n\t// Create a StringWriter to render the metrics into text format\n\twriter := &strings.Builder{}\n\ttotalCnt := 0\n\tfor _, metric := range metrics {\n\t\t// Write each metric family to text\n\t\tcnt, err := expfmt.MetricFamilyToText(writer, metric)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"error writing metric family\", err)\n\t\t\treturn \"\", err\n\t\t}\n\t\ttotalCnt += cnt\n\t}\n\n\t// Get the rendered metrics as a string from the StringWriter\n\treturn writer.String(), nil\n}\n\nfunc (h *Handler) KillEgress(ctx context.Context, req *ipc.KillEgressRequest) (*emptypb.Empty, error) {\n\tctx, span := tracer.Start(ctx, \"Handler.KillEgress\")\n\tdefer span.End()\n\n\t<-h.initialized.Watch()\n\n\tif h.controller == nil {\n\t\t// failed to start controller\n\t\treturn &emptypb.Empty{}, nil\n\t}\n\n\th.controller.SendEOS(ctx, livekit.EndReasonKilled)\n\th.controller.Info.SetFailed(psrpc.NewErrorf(psrpc.PermissionDenied, \"%s\", req.Error))\n\n\treturn &emptypb.Empty{}, nil\n}\n"
  },
  {
    "path": "pkg/handler/handler_rpc.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage handler\n\nimport (\n\t\"context\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/protocol/livekit\"\n)\n\nfunc (h *Handler) UpdateStream(ctx context.Context, req *livekit.UpdateStreamRequest) (*livekit.EgressInfo, error) {\n\tctx, span := tracer.Start(ctx, \"Handler.UpdateStream\")\n\tdefer span.End()\n\n\t<-h.initialized.Watch()\n\tif h.controller == nil {\n\t\treturn nil, errors.ErrEgressNotFound\n\t}\n\n\terr := h.controller.UpdateStream(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn h.controller.Info, nil\n}\n\nfunc (h *Handler) UpdateEgress(ctx context.Context, req *livekit.UpdateEgressRequest) (*livekit.EgressInfo, error) {\n\tctx, span := tracer.Start(ctx, \"Handler.UpdateEgress\")\n\tdefer span.End()\n\n\t<-h.initialized.Watch()\n\tif h.controller == nil {\n\t\treturn nil, errors.ErrEgressNotFound\n\t}\n\n\terr := h.controller.UpdateEgress(ctx, req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn h.controller.Info, nil\n}\n\nfunc (h *Handler) StopEgress(ctx context.Context, _ *livekit.StopEgressRequest) (*livekit.EgressInfo, error) {\n\tctx, span := tracer.Start(ctx, \"Handler.StopEgress\")\n\tdefer span.End()\n\n\t<-h.initialized.Watch()\n\tif h.controller == nil {\n\t\treturn nil, errors.ErrEgressNotFound\n\t}\n\n\th.controller.SendEOS(ctx, livekit.EndReasonAPI)\n\treturn h.controller.Info, nil\n}\n"
  },
  {
    "path": "pkg/info/io.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage info\n\nimport (\n\t\"context\"\n\t\"hash/fnv\"\n\t\"math\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/frostbyte73/core\"\n\t\"github.com/linkdata/deadlock\"\n\n\t\"github.com/livekit/protocol/egress\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/rpc\"\n\t\"github.com/livekit/psrpc\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n)\n\nconst (\n\tnumWorkers                     = 5\n\tmaxBackoff                     = time.Minute * 1\n\tunhealthyShutdownWatchdogDelay = 20 * time.Second // TODO change to 10 min once we understand PSRPC failures\n)\n\ntype SessionReporter interface {\n\tCreateEgress(ctx context.Context, info *livekit.EgressInfo) chan error\n\tUpdateEgress(ctx context.Context, info *livekit.EgressInfo) error\n\tUpdateMetrics(ctx context.Context, req *rpc.UpdateMetricsRequest) error\n\tIsHealthy() bool\n\tSetWatchdogHandler(w func())\n\tDrain()\n}\n\ntype sessionReporter struct {\n\trpc.IOInfoClient\n\n\tcreateTimeout time.Duration\n\tupdateTimeout time.Duration\n\n\tworkers []*worker\n\n\thealthyLock            deadlock.Mutex\n\thealthy                bool\n\thealthyWatchdogHandler func()\n\thealthyTimer           *time.Timer\n\n\tdraining core.Fuse\n\tdone     core.Fuse\n}\n\ntype worker struct {\n\tmu       deadlock.Mutex\n\tcreating map[string]*update\n\tupdates  map[string]*update\n\tqueue    chan string\n}\n\ntype update struct {\n\tctx  context.Context\n\tinfo *livekit.EgressInfo\n}\n\nfunc NewSessionReporter(conf *config.BaseConfig, bus psrpc.MessageBus) (SessionReporter, error) {\n\tclient, err := rpc.NewIOInfoClient(bus, psrpc.WithClientSelectTimeout(conf.IOSelectionTimeout), rpc.WithClientObservability(logger.GetLogger()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &sessionReporter{\n\t\tIOInfoClient:  client,\n\t\tcreateTimeout: conf.IOCreateTimeout,\n\t\tupdateTimeout: conf.IOUpdateTimeout,\n\t\tworkers:       make([]*worker, conf.IOWorkers),\n\t}\n\tc.healthy = true\n\tc.healthyTimer = time.AfterFunc(time.Duration(math.MaxInt64), func() {\n\t\tc.healthyLock.Lock()\n\t\tdefer c.healthyLock.Unlock()\n\n\t\tlogger.Errorw(\"io client watchdog triggered\", errors.New(\"io client unhealthy\"))\n\t\tif c.healthyWatchdogHandler != nil {\n\t\t\tc.healthyWatchdogHandler()\n\t\t}\n\t\t// Do not wait for the event queue to drain\n\t\tc.done.Break()\n\t})\n\n\tfor i := 0; i < conf.IOWorkers; i++ {\n\t\tc.workers[i] = &worker{\n\t\t\tcreating: make(map[string]*update),\n\t\t\tupdates:  make(map[string]*update),\n\t\t\tqueue:    make(chan string, 500),\n\t\t}\n\t\tgo c.runWorker(c.workers[i])\n\t}\n\n\treturn c, nil\n}\n\nfunc (c *sessionReporter) CreateEgress(ctx context.Context, info *livekit.EgressInfo) chan error {\n\tu := &update{}\n\tw := c.getWorker(info.EgressId)\n\n\tw.mu.Lock()\n\tw.creating[info.EgressId] = u\n\tw.mu.Unlock()\n\n\terrChan := make(chan error, 1)\n\tgo func() {\n\t\t_, err := c.IOInfoClient.CreateEgress(ctx, info, psrpc.WithRequestTimeout(c.createTimeout))\n\n\t\tw.mu.Lock()\n\t\tdelete(w.creating, info.EgressId)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"failed to create egress\", err, \"egressID\", info.EgressId)\n\t\t\tdelete(w.updates, info.EgressId)\n\t\t} else if u.info != nil {\n\t\t\terr = w.submit(u)\n\t\t}\n\t\tw.mu.Unlock()\n\n\t\terrChan <- err\n\t}()\n\n\treturn errChan\n}\n\nfunc (c *sessionReporter) UpdateEgress(ctx context.Context, info *livekit.EgressInfo) error {\n\tctx = context.WithoutCancel(ctx)\n\n\tw := c.getWorker(info.EgressId)\n\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\n\tu := w.creating[info.EgressId]\n\tif u == nil {\n\t\tu = w.updates[info.EgressId]\n\t}\n\tif u != nil {\n\t\tu.ctx = ctx\n\t\tu.info = info\n\t\treturn nil\n\t}\n\n\treturn w.submit(&update{\n\t\tctx:  ctx,\n\t\tinfo: info,\n\t})\n}\n\nfunc (c *sessionReporter) UpdateMetrics(_ context.Context, _ *rpc.UpdateMetricsRequest) error {\n\treturn nil\n}\n\nfunc (c *sessionReporter) SetWatchdogHandler(w func()) {\n\tc.healthyLock.Lock()\n\tdefer c.healthyLock.Unlock()\n\n\tc.healthyWatchdogHandler = w\n}\n\nfunc (c *sessionReporter) IsHealthy() bool {\n\tc.healthyLock.Lock()\n\tdefer c.healthyLock.Unlock()\n\n\treturn c.healthy\n}\n\nfunc (c *sessionReporter) Drain() {\n\tc.draining.Break()\n\t<-c.done.Watch()\n}\n\nfunc (c *sessionReporter) runWorker(w *worker) {\n\tdraining := c.draining.Watch()\n\tfor {\n\t\tselect {\n\t\tcase egressID := <-w.queue:\n\t\t\tc.handleUpdate(w, egressID)\n\t\tcase <-draining:\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase egressID := <-w.queue:\n\t\t\t\t\tc.handleUpdate(w, egressID)\n\t\t\t\tdefault:\n\t\t\t\t\tc.done.Break()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *sessionReporter) getWorker(egressID string) *worker {\n\th := fnv.New32a()\n\t_, _ = h.Write([]byte(egressID))\n\treturn c.workers[int(h.Sum32())%len(c.workers)]\n}\n\nfunc (w *worker) submit(u *update) error {\n\tw.updates[u.info.EgressId] = u\n\n\tselect {\n\tcase w.queue <- u.info.EgressId:\n\t\treturn nil\n\tdefault:\n\t\tdelete(w.updates, u.info.EgressId)\n\t\treturn errors.New(\"queue is full\")\n\t}\n}\n\nfunc (c *sessionReporter) handleUpdate(w *worker, egressID string) {\n\tw.mu.Lock()\n\tu := w.updates[egressID]\n\tdelete(w.updates, egressID)\n\tw.mu.Unlock()\n\tif u == nil {\n\t\treturn\n\t}\n\n\td := time.Millisecond * 250\n\tfor {\n\t\tif _, err := c.IOInfoClient.UpdateEgress(u.ctx, u.info, psrpc.WithRequestTimeout(c.updateTimeout)); err != nil {\n\t\t\tif isRetryableError(err) {\n\t\t\t\tif c.setHealthy(false) {\n\t\t\t\t\tlogger.Warnw(\"io connection unhealthy\", err, \"egressID\", u.info.EgressId)\n\t\t\t\t}\n\t\t\t\tlogger.Debugw(\"psrpc IO request failed\", \"error\", err, \"egressID\", u.info.EgressId)\n\n\t\t\t\td = min(d*2, maxBackoff)\n\t\t\t\ttime.Sleep(d)\n\n\t\t\t\tselect {\n\t\t\t\tcase <-u.ctx.Done():\n\t\t\t\t\tlogger.Infow(\"failed to update egress on expired context\", \"egressID\", u.info.EgressId)\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlogger.Errorw(\"failed to update egress\", err, \"egressID\", u.info.EgressId)\n\t\t\treturn\n\t\t}\n\n\t\tif !c.setHealthy(true) {\n\t\t\tlogger.Infow(\"io connection restored\", \"egressID\", u.info.EgressId)\n\t\t}\n\t\tvar typesInput any = u.info.Request\n\t\tif e, ok := u.info.Request.(*livekit.EgressInfo_Replay); ok {\n\t\t\ttypesInput = e.Replay\n\t\t}\n\t\trequestType, outputType := egress.GetTypes(typesInput)\n\t\tlogger.Infow(strings.ToLower(u.info.Status.String()),\n\t\t\t\"egressID\", u.info.EgressId,\n\t\t\t\"requestType\", requestType,\n\t\t\t\"outputType\", outputType,\n\t\t\t\"error\", u.info.Error,\n\t\t\t\"code\", u.info.ErrorCode,\n\t\t\t\"details\", u.info.Details,\n\t\t)\n\t\treturn\n\t}\n}\n\nfunc (c *sessionReporter) setHealthy(isHealthy bool) bool {\n\tc.healthyLock.Lock()\n\tdefer c.healthyLock.Unlock()\n\n\toldHealthy := c.healthy\n\n\tswitch c.healthy {\n\tcase true:\n\t\tif !isHealthy {\n\t\t\tc.healthyTimer.Reset(unhealthyShutdownWatchdogDelay)\n\t\t}\n\tcase false:\n\t\tif isHealthy {\n\t\t\tc.healthyTimer.Reset(time.Duration(math.MaxInt64))\n\t\t}\n\t}\n\n\tc.healthy = isHealthy\n\n\treturn oldHealthy\n}\n\nfunc isRetryableError(err error) bool {\n\treturn errors.Is(err, psrpc.ErrRequestTimedOut) || errors.Is(err, psrpc.ErrNoResponse)\n}\n"
  },
  {
    "path": "pkg/ipc/conn.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage ipc\n\nimport (\n\t\"net\"\n\t\"path\"\n\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials/insecure\"\n\n\t\"github.com/livekit/protocol/logger\"\n)\n\nconst (\n\tnetwork        = \"unix\"\n\thandlerAddress = \"handler_ipc.sock\"\n\tserviceAddress = \"service_ipc.sock\"\n)\n\ntype EgressHandlerClientWrapper struct {\n\tEgressHandlerClient\n\tconn *grpc.ClientConn\n}\n\nfunc StartServiceListener(ipcServer *grpc.Server, serviceTmpDir string) error {\n\tlistener, err := net.Listen(network, path.Join(serviceTmpDir, serviceAddress))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo func() {\n\t\tif err = ipcServer.Serve(listener); err != nil {\n\t\t\tlogger.Errorw(\"failed to start grpc handler\", err)\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc NewHandlerClient(handlerTmpDir string) (*EgressHandlerClientWrapper, error) {\n\tsocketAddr := \"unix://\" + path.Join(handlerTmpDir, handlerAddress)\n\tconn, err := grpc.NewClient(socketAddr,\n\t\tgrpc.WithTransportCredentials(insecure.NewCredentials()),\n\t)\n\tif err != nil {\n\t\tlogger.Errorw(\"could not dial grpc handler\", err)\n\t\treturn nil, err\n\t}\n\n\treturn &EgressHandlerClientWrapper{EgressHandlerClient: NewEgressHandlerClient(conn), conn: conn}, nil\n}\n\nfunc StartHandlerListener(ipcServer *grpc.Server, handlerTmpDir string) error {\n\tlistener, err := net.Listen(network, path.Join(handlerTmpDir, handlerAddress))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo func() {\n\t\tif err = ipcServer.Serve(listener); err != nil {\n\t\t\tlogger.Errorw(\"failed to start grpc handler\", err)\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc NewServiceClient(serviceTmpDir string) (EgressServiceClient, error) {\n\tsocketAddr := \"unix://\" + path.Join(serviceTmpDir, serviceAddress)\n\tconn, err := grpc.NewClient(socketAddr,\n\t\tgrpc.WithTransportCredentials(insecure.NewCredentials()),\n\t)\n\tif err != nil {\n\t\tlogger.Errorw(\"could not dial grpc handler\", err)\n\t\treturn nil, err\n\t}\n\n\treturn NewEgressServiceClient(conn), nil\n}\n\nfunc (c EgressHandlerClientWrapper) Close() error {\n\treturn c.conn.Close()\n}\n"
  },
  {
    "path": "pkg/ipc/ipc.pb.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.36.6\n// \tprotoc        v6.33.0\n// source: ipc.proto\n\npackage ipc\n\nimport (\n\tlivekit \"github.com/livekit/protocol/livekit\"\n\trpc \"github.com/livekit/protocol/rpc\"\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\temptypb \"google.golang.org/protobuf/types/known/emptypb\"\n\treflect \"reflect\"\n\tsync \"sync\"\n\tunsafe \"unsafe\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype HandlerReadyRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tEgressId      string                 `protobuf:\"bytes,1,opt,name=egress_id,json=egressId,proto3\" json:\"egress_id,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *HandlerReadyRequest) Reset() {\n\t*x = HandlerReadyRequest{}\n\tmi := &file_ipc_proto_msgTypes[0]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *HandlerReadyRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*HandlerReadyRequest) ProtoMessage() {}\n\nfunc (x *HandlerReadyRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_ipc_proto_msgTypes[0]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use HandlerReadyRequest.ProtoReflect.Descriptor instead.\nfunc (*HandlerReadyRequest) Descriptor() ([]byte, []int) {\n\treturn file_ipc_proto_rawDescGZIP(), []int{0}\n}\n\nfunc (x *HandlerReadyRequest) GetEgressId() string {\n\tif x != nil {\n\t\treturn x.EgressId\n\t}\n\treturn \"\"\n}\n\ntype HandlerFinishedRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tEgressId      string                 `protobuf:\"bytes,1,opt,name=egress_id,json=egressId,proto3\" json:\"egress_id,omitempty\"`\n\tMetrics       string                 `protobuf:\"bytes,2,opt,name=metrics,proto3\" json:\"metrics,omitempty\"`\n\tInfo          *livekit.EgressInfo    `protobuf:\"bytes,3,opt,name=info,proto3\" json:\"info,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *HandlerFinishedRequest) Reset() {\n\t*x = HandlerFinishedRequest{}\n\tmi := &file_ipc_proto_msgTypes[1]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *HandlerFinishedRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*HandlerFinishedRequest) ProtoMessage() {}\n\nfunc (x *HandlerFinishedRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_ipc_proto_msgTypes[1]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use HandlerFinishedRequest.ProtoReflect.Descriptor instead.\nfunc (*HandlerFinishedRequest) Descriptor() ([]byte, []int) {\n\treturn file_ipc_proto_rawDescGZIP(), []int{1}\n}\n\nfunc (x *HandlerFinishedRequest) GetEgressId() string {\n\tif x != nil {\n\t\treturn x.EgressId\n\t}\n\treturn \"\"\n}\n\nfunc (x *HandlerFinishedRequest) GetMetrics() string {\n\tif x != nil {\n\t\treturn x.Metrics\n\t}\n\treturn \"\"\n}\n\nfunc (x *HandlerFinishedRequest) GetInfo() *livekit.EgressInfo {\n\tif x != nil {\n\t\treturn x.Info\n\t}\n\treturn nil\n}\n\ntype StorageEventRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tEgressId      string                 `protobuf:\"bytes,1,opt,name=egress_id,json=egressId,proto3\" json:\"egress_id,omitempty\"`\n\tOperation     string                 `protobuf:\"bytes,2,opt,name=operation,proto3\" json:\"operation,omitempty\"`\n\tPath          string                 `protobuf:\"bytes,3,opt,name=path,proto3\" json:\"path,omitempty\"`\n\tSize          int64                  `protobuf:\"varint,4,opt,name=size,proto3\" json:\"size,omitempty\"`\n\tLifetimeDays  int64                  `protobuf:\"varint,5,opt,name=lifetime_days,json=lifetimeDays,proto3\" json:\"lifetime_days,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *StorageEventRequest) Reset() {\n\t*x = StorageEventRequest{}\n\tmi := &file_ipc_proto_msgTypes[2]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *StorageEventRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*StorageEventRequest) ProtoMessage() {}\n\nfunc (x *StorageEventRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_ipc_proto_msgTypes[2]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use StorageEventRequest.ProtoReflect.Descriptor instead.\nfunc (*StorageEventRequest) Descriptor() ([]byte, []int) {\n\treturn file_ipc_proto_rawDescGZIP(), []int{2}\n}\n\nfunc (x *StorageEventRequest) GetEgressId() string {\n\tif x != nil {\n\t\treturn x.EgressId\n\t}\n\treturn \"\"\n}\n\nfunc (x *StorageEventRequest) GetOperation() string {\n\tif x != nil {\n\t\treturn x.Operation\n\t}\n\treturn \"\"\n}\n\nfunc (x *StorageEventRequest) GetPath() string {\n\tif x != nil {\n\t\treturn x.Path\n\t}\n\treturn \"\"\n}\n\nfunc (x *StorageEventRequest) GetSize() int64 {\n\tif x != nil {\n\t\treturn x.Size\n\t}\n\treturn 0\n}\n\nfunc (x *StorageEventRequest) GetLifetimeDays() int64 {\n\tif x != nil {\n\t\treturn x.LifetimeDays\n\t}\n\treturn 0\n}\n\ntype GstPipelineDebugDotRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *GstPipelineDebugDotRequest) Reset() {\n\t*x = GstPipelineDebugDotRequest{}\n\tmi := &file_ipc_proto_msgTypes[3]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *GstPipelineDebugDotRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*GstPipelineDebugDotRequest) ProtoMessage() {}\n\nfunc (x *GstPipelineDebugDotRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_ipc_proto_msgTypes[3]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use GstPipelineDebugDotRequest.ProtoReflect.Descriptor instead.\nfunc (*GstPipelineDebugDotRequest) Descriptor() ([]byte, []int) {\n\treturn file_ipc_proto_rawDescGZIP(), []int{3}\n}\n\ntype GstPipelineDebugDotResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tDotFile       string                 `protobuf:\"bytes,1,opt,name=dot_file,json=dotFile,proto3\" json:\"dot_file,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *GstPipelineDebugDotResponse) Reset() {\n\t*x = GstPipelineDebugDotResponse{}\n\tmi := &file_ipc_proto_msgTypes[4]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *GstPipelineDebugDotResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*GstPipelineDebugDotResponse) ProtoMessage() {}\n\nfunc (x *GstPipelineDebugDotResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_ipc_proto_msgTypes[4]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use GstPipelineDebugDotResponse.ProtoReflect.Descriptor instead.\nfunc (*GstPipelineDebugDotResponse) Descriptor() ([]byte, []int) {\n\treturn file_ipc_proto_rawDescGZIP(), []int{4}\n}\n\nfunc (x *GstPipelineDebugDotResponse) GetDotFile() string {\n\tif x != nil {\n\t\treturn x.DotFile\n\t}\n\treturn \"\"\n}\n\ntype PProfRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tProfileName   string                 `protobuf:\"bytes,1,opt,name=profile_name,json=profileName,proto3\" json:\"profile_name,omitempty\"`\n\tTimeout       int32                  `protobuf:\"varint,2,opt,name=timeout,proto3\" json:\"timeout,omitempty\"`\n\tDebug         int32                  `protobuf:\"varint,3,opt,name=debug,proto3\" json:\"debug,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *PProfRequest) Reset() {\n\t*x = PProfRequest{}\n\tmi := &file_ipc_proto_msgTypes[5]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *PProfRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*PProfRequest) ProtoMessage() {}\n\nfunc (x *PProfRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_ipc_proto_msgTypes[5]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use PProfRequest.ProtoReflect.Descriptor instead.\nfunc (*PProfRequest) Descriptor() ([]byte, []int) {\n\treturn file_ipc_proto_rawDescGZIP(), []int{5}\n}\n\nfunc (x *PProfRequest) GetProfileName() string {\n\tif x != nil {\n\t\treturn x.ProfileName\n\t}\n\treturn \"\"\n}\n\nfunc (x *PProfRequest) GetTimeout() int32 {\n\tif x != nil {\n\t\treturn x.Timeout\n\t}\n\treturn 0\n}\n\nfunc (x *PProfRequest) GetDebug() int32 {\n\tif x != nil {\n\t\treturn x.Debug\n\t}\n\treturn 0\n}\n\ntype PProfResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tPprofFile     []byte                 `protobuf:\"bytes,1,opt,name=pprof_file,json=pprofFile,proto3\" json:\"pprof_file,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *PProfResponse) Reset() {\n\t*x = PProfResponse{}\n\tmi := &file_ipc_proto_msgTypes[6]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *PProfResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*PProfResponse) ProtoMessage() {}\n\nfunc (x *PProfResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_ipc_proto_msgTypes[6]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use PProfResponse.ProtoReflect.Descriptor instead.\nfunc (*PProfResponse) Descriptor() ([]byte, []int) {\n\treturn file_ipc_proto_rawDescGZIP(), []int{6}\n}\n\nfunc (x *PProfResponse) GetPprofFile() []byte {\n\tif x != nil {\n\t\treturn x.PprofFile\n\t}\n\treturn nil\n}\n\ntype MetricsRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *MetricsRequest) Reset() {\n\t*x = MetricsRequest{}\n\tmi := &file_ipc_proto_msgTypes[7]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *MetricsRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*MetricsRequest) ProtoMessage() {}\n\nfunc (x *MetricsRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_ipc_proto_msgTypes[7]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use MetricsRequest.ProtoReflect.Descriptor instead.\nfunc (*MetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_ipc_proto_rawDescGZIP(), []int{7}\n}\n\ntype MetricsResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tMetrics       string                 `protobuf:\"bytes,1,opt,name=metrics,proto3\" json:\"metrics,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *MetricsResponse) Reset() {\n\t*x = MetricsResponse{}\n\tmi := &file_ipc_proto_msgTypes[8]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *MetricsResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*MetricsResponse) ProtoMessage() {}\n\nfunc (x *MetricsResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_ipc_proto_msgTypes[8]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use MetricsResponse.ProtoReflect.Descriptor instead.\nfunc (*MetricsResponse) Descriptor() ([]byte, []int) {\n\treturn file_ipc_proto_rawDescGZIP(), []int{8}\n}\n\nfunc (x *MetricsResponse) GetMetrics() string {\n\tif x != nil {\n\t\treturn x.Metrics\n\t}\n\treturn \"\"\n}\n\ntype KillEgressRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tError         string                 `protobuf:\"bytes,1,opt,name=error,proto3\" json:\"error,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *KillEgressRequest) Reset() {\n\t*x = KillEgressRequest{}\n\tmi := &file_ipc_proto_msgTypes[9]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *KillEgressRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*KillEgressRequest) ProtoMessage() {}\n\nfunc (x *KillEgressRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_ipc_proto_msgTypes[9]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use KillEgressRequest.ProtoReflect.Descriptor instead.\nfunc (*KillEgressRequest) Descriptor() ([]byte, []int) {\n\treturn file_ipc_proto_rawDescGZIP(), []int{9}\n}\n\nfunc (x *KillEgressRequest) GetError() string {\n\tif x != nil {\n\t\treturn x.Error\n\t}\n\treturn \"\"\n}\n\nvar File_ipc_proto protoreflect.FileDescriptor\n\nconst file_ipc_proto_rawDesc = \"\" +\n\t\"\\n\" +\n\t\"\\tipc.proto\\x12\\x03ipc\\x1a\\x1bgoogle/protobuf/empty.proto\\x1a\\x14livekit_egress.proto\\x1a\\x10rpc/egress.proto\\\"2\\n\" +\n\t\"\\x13HandlerReadyRequest\\x12\\x1b\\n\" +\n\t\"\\tegress_id\\x18\\x01 \\x01(\\tR\\begressId\\\"x\\n\" +\n\t\"\\x16HandlerFinishedRequest\\x12\\x1b\\n\" +\n\t\"\\tegress_id\\x18\\x01 \\x01(\\tR\\begressId\\x12\\x18\\n\" +\n\t\"\\ametrics\\x18\\x02 \\x01(\\tR\\ametrics\\x12'\\n\" +\n\t\"\\x04info\\x18\\x03 \\x01(\\v2\\x13.livekit.EgressInfoR\\x04info\\\"\\x9d\\x01\\n\" +\n\t\"\\x13StorageEventRequest\\x12\\x1b\\n\" +\n\t\"\\tegress_id\\x18\\x01 \\x01(\\tR\\begressId\\x12\\x1c\\n\" +\n\t\"\\toperation\\x18\\x02 \\x01(\\tR\\toperation\\x12\\x12\\n\" +\n\t\"\\x04path\\x18\\x03 \\x01(\\tR\\x04path\\x12\\x12\\n\" +\n\t\"\\x04size\\x18\\x04 \\x01(\\x03R\\x04size\\x12#\\n\" +\n\t\"\\rlifetime_days\\x18\\x05 \\x01(\\x03R\\flifetimeDays\\\"\\x1c\\n\" +\n\t\"\\x1aGstPipelineDebugDotRequest\\\"8\\n\" +\n\t\"\\x1bGstPipelineDebugDotResponse\\x12\\x19\\n\" +\n\t\"\\bdot_file\\x18\\x01 \\x01(\\tR\\adotFile\\\"a\\n\" +\n\t\"\\fPProfRequest\\x12!\\n\" +\n\t\"\\fprofile_name\\x18\\x01 \\x01(\\tR\\vprofileName\\x12\\x18\\n\" +\n\t\"\\atimeout\\x18\\x02 \\x01(\\x05R\\atimeout\\x12\\x14\\n\" +\n\t\"\\x05debug\\x18\\x03 \\x01(\\x05R\\x05debug\\\".\\n\" +\n\t\"\\rPProfResponse\\x12\\x1d\\n\" +\n\t\"\\n\" +\n\t\"pprof_file\\x18\\x01 \\x01(\\fR\\tpprofFile\\\"\\x10\\n\" +\n\t\"\\x0eMetricsRequest\\\"+\\n\" +\n\t\"\\x0fMetricsResponse\\x12\\x18\\n\" +\n\t\"\\ametrics\\x18\\x01 \\x01(\\tR\\ametrics\\\")\\n\" +\n\t\"\\x11KillEgressRequest\\x12\\x14\\n\" +\n\t\"\\x05error\\x18\\x01 \\x01(\\tR\\x05error2\\xe5\\x02\\n\" +\n\t\"\\rEgressService\\x12B\\n\" +\n\t\"\\fHandlerReady\\x12\\x18.ipc.HandlerReadyRequest\\x1a\\x16.google.protobuf.Empty\\\"\\x00\\x12>\\n\" +\n\t\"\\rHandlerUpdate\\x12\\x13.livekit.EgressInfo\\x1a\\x16.google.protobuf.Empty\\\"\\x00\\x12H\\n\" +\n\t\"\\x0fHandlerFinished\\x12\\x1b.ipc.HandlerFinishedRequest\\x1a\\x16.google.protobuf.Empty\\\"\\x00\\x12B\\n\" +\n\t\"\\vReplayReady\\x12\\x17.rpc.EgressReadyRequest\\x1a\\x18.rpc.EgressReadyResponse\\\"\\x00\\x12B\\n\" +\n\t\"\\fStorageEvent\\x12\\x18.ipc.StorageEventRequest\\x1a\\x16.google.protobuf.Empty\\\"\\x002\\x96\\x02\\n\" +\n\t\"\\rEgressHandler\\x12U\\n\" +\n\t\"\\x0eGetPipelineDot\\x12\\x1f.ipc.GstPipelineDebugDotRequest\\x1a .ipc.GstPipelineDebugDotResponse\\\"\\x00\\x123\\n\" +\n\t\"\\bGetPProf\\x12\\x11.ipc.PProfRequest\\x1a\\x12.ipc.PProfResponse\\\"\\x00\\x129\\n\" +\n\t\"\\n\" +\n\t\"GetMetrics\\x12\\x13.ipc.MetricsRequest\\x1a\\x14.ipc.MetricsResponse\\\"\\x00\\x12>\\n\" +\n\t\"\\n\" +\n\t\"KillEgress\\x12\\x16.ipc.KillEgressRequest\\x1a\\x16.google.protobuf.Empty\\\"\\x00B#Z!github.com/livekit/egress/pkg/ipcb\\x06proto3\"\n\nvar (\n\tfile_ipc_proto_rawDescOnce sync.Once\n\tfile_ipc_proto_rawDescData []byte\n)\n\nfunc file_ipc_proto_rawDescGZIP() []byte {\n\tfile_ipc_proto_rawDescOnce.Do(func() {\n\t\tfile_ipc_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_ipc_proto_rawDesc), len(file_ipc_proto_rawDesc)))\n\t})\n\treturn file_ipc_proto_rawDescData\n}\n\nvar file_ipc_proto_msgTypes = make([]protoimpl.MessageInfo, 10)\nvar file_ipc_proto_goTypes = []any{\n\t(*HandlerReadyRequest)(nil),         // 0: ipc.HandlerReadyRequest\n\t(*HandlerFinishedRequest)(nil),      // 1: ipc.HandlerFinishedRequest\n\t(*StorageEventRequest)(nil),         // 2: ipc.StorageEventRequest\n\t(*GstPipelineDebugDotRequest)(nil),  // 3: ipc.GstPipelineDebugDotRequest\n\t(*GstPipelineDebugDotResponse)(nil), // 4: ipc.GstPipelineDebugDotResponse\n\t(*PProfRequest)(nil),                // 5: ipc.PProfRequest\n\t(*PProfResponse)(nil),               // 6: ipc.PProfResponse\n\t(*MetricsRequest)(nil),              // 7: ipc.MetricsRequest\n\t(*MetricsResponse)(nil),             // 8: ipc.MetricsResponse\n\t(*KillEgressRequest)(nil),           // 9: ipc.KillEgressRequest\n\t(*livekit.EgressInfo)(nil),          // 10: livekit.EgressInfo\n\t(*rpc.EgressReadyRequest)(nil),      // 11: rpc.EgressReadyRequest\n\t(*emptypb.Empty)(nil),               // 12: google.protobuf.Empty\n\t(*rpc.EgressReadyResponse)(nil),     // 13: rpc.EgressReadyResponse\n}\nvar file_ipc_proto_depIdxs = []int32{\n\t10, // 0: ipc.HandlerFinishedRequest.info:type_name -> livekit.EgressInfo\n\t0,  // 1: ipc.EgressService.HandlerReady:input_type -> ipc.HandlerReadyRequest\n\t10, // 2: ipc.EgressService.HandlerUpdate:input_type -> livekit.EgressInfo\n\t1,  // 3: ipc.EgressService.HandlerFinished:input_type -> ipc.HandlerFinishedRequest\n\t11, // 4: ipc.EgressService.ReplayReady:input_type -> rpc.EgressReadyRequest\n\t2,  // 5: ipc.EgressService.StorageEvent:input_type -> ipc.StorageEventRequest\n\t3,  // 6: ipc.EgressHandler.GetPipelineDot:input_type -> ipc.GstPipelineDebugDotRequest\n\t5,  // 7: ipc.EgressHandler.GetPProf:input_type -> ipc.PProfRequest\n\t7,  // 8: ipc.EgressHandler.GetMetrics:input_type -> ipc.MetricsRequest\n\t9,  // 9: ipc.EgressHandler.KillEgress:input_type -> ipc.KillEgressRequest\n\t12, // 10: ipc.EgressService.HandlerReady:output_type -> google.protobuf.Empty\n\t12, // 11: ipc.EgressService.HandlerUpdate:output_type -> google.protobuf.Empty\n\t12, // 12: ipc.EgressService.HandlerFinished:output_type -> google.protobuf.Empty\n\t13, // 13: ipc.EgressService.ReplayReady:output_type -> rpc.EgressReadyResponse\n\t12, // 14: ipc.EgressService.StorageEvent:output_type -> google.protobuf.Empty\n\t4,  // 15: ipc.EgressHandler.GetPipelineDot:output_type -> ipc.GstPipelineDebugDotResponse\n\t6,  // 16: ipc.EgressHandler.GetPProf:output_type -> ipc.PProfResponse\n\t8,  // 17: ipc.EgressHandler.GetMetrics:output_type -> ipc.MetricsResponse\n\t12, // 18: ipc.EgressHandler.KillEgress:output_type -> google.protobuf.Empty\n\t10, // [10:19] is the sub-list for method output_type\n\t1,  // [1:10] is the sub-list for method input_type\n\t1,  // [1:1] is the sub-list for extension type_name\n\t1,  // [1:1] is the sub-list for extension extendee\n\t0,  // [0:1] is the sub-list for field type_name\n}\n\nfunc init() { file_ipc_proto_init() }\nfunc file_ipc_proto_init() {\n\tif File_ipc_proto != nil {\n\t\treturn\n\t}\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: unsafe.Slice(unsafe.StringData(file_ipc_proto_rawDesc), len(file_ipc_proto_rawDesc)),\n\t\t\tNumEnums:      0,\n\t\t\tNumMessages:   10,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   2,\n\t\t},\n\t\tGoTypes:           file_ipc_proto_goTypes,\n\t\tDependencyIndexes: file_ipc_proto_depIdxs,\n\t\tMessageInfos:      file_ipc_proto_msgTypes,\n\t}.Build()\n\tFile_ipc_proto = out.File\n\tfile_ipc_proto_goTypes = nil\n\tfile_ipc_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "pkg/ipc/ipc.proto",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nsyntax = \"proto3\";\n\npackage ipc;\noption go_package = \"github.com/livekit/egress/pkg/ipc\";\n\nimport \"google/protobuf/empty.proto\";\nimport \"livekit_egress.proto\";\nimport \"rpc/egress.proto\";\n\nservice EgressService {\n  rpc HandlerReady(HandlerReadyRequest) returns (google.protobuf.Empty) {};\n  rpc HandlerUpdate(livekit.EgressInfo) returns (google.protobuf.Empty) {};\n  rpc HandlerFinished(HandlerFinishedRequest) returns (google.protobuf.Empty) {};\n  rpc ReplayReady(rpc.EgressReadyRequest) returns (rpc.EgressReadyResponse) {};\n  rpc StorageEvent(StorageEventRequest) returns (google.protobuf.Empty) {};\n}\n\nmessage HandlerReadyRequest {\n  string egress_id = 1;\n}\n\nmessage HandlerFinishedRequest {\n  string egress_id = 1;\n  string metrics = 2;\n  livekit.EgressInfo info = 3;\n}\n\nmessage StorageEventRequest {\n  string egress_id = 1;\n  string operation = 2;\n  string path = 3;\n  int64 size = 4;\n  int64 lifetime_days = 5;\n}\n\nservice EgressHandler {\n  rpc GetPipelineDot(GstPipelineDebugDotRequest) returns (GstPipelineDebugDotResponse) {};\n  rpc GetPProf(PProfRequest) returns (PProfResponse) {};\n  rpc GetMetrics(MetricsRequest) returns (MetricsResponse) {};\n  rpc KillEgress(KillEgressRequest) returns (google.protobuf.Empty) {};\n}\n\nmessage GstPipelineDebugDotRequest {}\n\nmessage GstPipelineDebugDotResponse {\n  string dot_file = 1;\n}\n\nmessage PProfRequest {\n  string profile_name = 1;\n  int32 timeout = 2;\n  int32 debug = 3;\n}\n\nmessage PProfResponse {\n  bytes pprof_file = 1;\n}\n\nmessage MetricsRequest {}\n\nmessage MetricsResponse {\n  string metrics = 1;\n}\n\nmessage KillEgressRequest {\n  string error = 1;\n}\n"
  },
  {
    "path": "pkg/ipc/ipc_grpc.pb.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Code generated by protoc-gen-go-grpc. DO NOT EDIT.\n// versions:\n// - protoc-gen-go-grpc v1.5.1\n// - protoc             v6.33.0\n// source: ipc.proto\n\npackage ipc\n\nimport (\n\tcontext \"context\"\n\tlivekit \"github.com/livekit/protocol/livekit\"\n\trpc \"github.com/livekit/protocol/rpc\"\n\tgrpc \"google.golang.org/grpc\"\n\tcodes \"google.golang.org/grpc/codes\"\n\tstatus \"google.golang.org/grpc/status\"\n\temptypb \"google.golang.org/protobuf/types/known/emptypb\"\n)\n\n// This is a compile-time assertion to ensure that this generated file\n// is compatible with the grpc package it is being compiled against.\n// Requires gRPC-Go v1.64.0 or later.\nconst _ = grpc.SupportPackageIsVersion9\n\nconst (\n\tEgressService_HandlerReady_FullMethodName    = \"/ipc.EgressService/HandlerReady\"\n\tEgressService_HandlerUpdate_FullMethodName   = \"/ipc.EgressService/HandlerUpdate\"\n\tEgressService_HandlerFinished_FullMethodName = \"/ipc.EgressService/HandlerFinished\"\n\tEgressService_ReplayReady_FullMethodName     = \"/ipc.EgressService/ReplayReady\"\n\tEgressService_StorageEvent_FullMethodName    = \"/ipc.EgressService/StorageEvent\"\n)\n\n// EgressServiceClient is the client API for EgressService service.\n//\n// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.\ntype EgressServiceClient interface {\n\tHandlerReady(ctx context.Context, in *HandlerReadyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)\n\tHandlerUpdate(ctx context.Context, in *livekit.EgressInfo, opts ...grpc.CallOption) (*emptypb.Empty, error)\n\tHandlerFinished(ctx context.Context, in *HandlerFinishedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)\n\tReplayReady(ctx context.Context, in *rpc.EgressReadyRequest, opts ...grpc.CallOption) (*rpc.EgressReadyResponse, error)\n\tStorageEvent(ctx context.Context, in *StorageEventRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)\n}\n\ntype egressServiceClient struct {\n\tcc grpc.ClientConnInterface\n}\n\nfunc NewEgressServiceClient(cc grpc.ClientConnInterface) EgressServiceClient {\n\treturn &egressServiceClient{cc}\n}\n\nfunc (c *egressServiceClient) HandlerReady(ctx context.Context, in *HandlerReadyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(emptypb.Empty)\n\terr := c.cc.Invoke(ctx, EgressService_HandlerReady_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *egressServiceClient) HandlerUpdate(ctx context.Context, in *livekit.EgressInfo, opts ...grpc.CallOption) (*emptypb.Empty, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(emptypb.Empty)\n\terr := c.cc.Invoke(ctx, EgressService_HandlerUpdate_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *egressServiceClient) HandlerFinished(ctx context.Context, in *HandlerFinishedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(emptypb.Empty)\n\terr := c.cc.Invoke(ctx, EgressService_HandlerFinished_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *egressServiceClient) ReplayReady(ctx context.Context, in *rpc.EgressReadyRequest, opts ...grpc.CallOption) (*rpc.EgressReadyResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(rpc.EgressReadyResponse)\n\terr := c.cc.Invoke(ctx, EgressService_ReplayReady_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *egressServiceClient) StorageEvent(ctx context.Context, in *StorageEventRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(emptypb.Empty)\n\terr := c.cc.Invoke(ctx, EgressService_StorageEvent_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\n// EgressServiceServer is the server API for EgressService service.\n// All implementations must embed UnimplementedEgressServiceServer\n// for forward compatibility.\ntype EgressServiceServer interface {\n\tHandlerReady(context.Context, *HandlerReadyRequest) (*emptypb.Empty, error)\n\tHandlerUpdate(context.Context, *livekit.EgressInfo) (*emptypb.Empty, error)\n\tHandlerFinished(context.Context, *HandlerFinishedRequest) (*emptypb.Empty, error)\n\tReplayReady(context.Context, *rpc.EgressReadyRequest) (*rpc.EgressReadyResponse, error)\n\tStorageEvent(context.Context, *StorageEventRequest) (*emptypb.Empty, error)\n\tmustEmbedUnimplementedEgressServiceServer()\n}\n\n// UnimplementedEgressServiceServer must be embedded to have\n// forward compatible implementations.\n//\n// NOTE: this should be embedded by value instead of pointer to avoid a nil\n// pointer dereference when methods are called.\ntype UnimplementedEgressServiceServer struct{}\n\nfunc (UnimplementedEgressServiceServer) HandlerReady(context.Context, *HandlerReadyRequest) (*emptypb.Empty, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method HandlerReady not implemented\")\n}\nfunc (UnimplementedEgressServiceServer) HandlerUpdate(context.Context, *livekit.EgressInfo) (*emptypb.Empty, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method HandlerUpdate not implemented\")\n}\nfunc (UnimplementedEgressServiceServer) HandlerFinished(context.Context, *HandlerFinishedRequest) (*emptypb.Empty, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method HandlerFinished not implemented\")\n}\nfunc (UnimplementedEgressServiceServer) ReplayReady(context.Context, *rpc.EgressReadyRequest) (*rpc.EgressReadyResponse, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method ReplayReady not implemented\")\n}\nfunc (UnimplementedEgressServiceServer) StorageEvent(context.Context, *StorageEventRequest) (*emptypb.Empty, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method StorageEvent not implemented\")\n}\nfunc (UnimplementedEgressServiceServer) mustEmbedUnimplementedEgressServiceServer() {}\nfunc (UnimplementedEgressServiceServer) testEmbeddedByValue()                       {}\n\n// UnsafeEgressServiceServer may be embedded to opt out of forward compatibility for this service.\n// Use of this interface is not recommended, as added methods to EgressServiceServer will\n// result in compilation errors.\ntype UnsafeEgressServiceServer interface {\n\tmustEmbedUnimplementedEgressServiceServer()\n}\n\nfunc RegisterEgressServiceServer(s grpc.ServiceRegistrar, srv EgressServiceServer) {\n\t// If the following call pancis, it indicates UnimplementedEgressServiceServer was\n\t// embedded by pointer and is nil.  This will cause panics if an\n\t// unimplemented method is ever invoked, so we test this at initialization\n\t// time to prevent it from happening at runtime later due to I/O.\n\tif t, ok := srv.(interface{ testEmbeddedByValue() }); ok {\n\t\tt.testEmbeddedByValue()\n\t}\n\ts.RegisterService(&EgressService_ServiceDesc, srv)\n}\n\nfunc _EgressService_HandlerReady_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(HandlerReadyRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(EgressServiceServer).HandlerReady(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: EgressService_HandlerReady_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(EgressServiceServer).HandlerReady(ctx, req.(*HandlerReadyRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _EgressService_HandlerUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(livekit.EgressInfo)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(EgressServiceServer).HandlerUpdate(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: EgressService_HandlerUpdate_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(EgressServiceServer).HandlerUpdate(ctx, req.(*livekit.EgressInfo))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _EgressService_HandlerFinished_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(HandlerFinishedRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(EgressServiceServer).HandlerFinished(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: EgressService_HandlerFinished_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(EgressServiceServer).HandlerFinished(ctx, req.(*HandlerFinishedRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _EgressService_ReplayReady_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(rpc.EgressReadyRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(EgressServiceServer).ReplayReady(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: EgressService_ReplayReady_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(EgressServiceServer).ReplayReady(ctx, req.(*rpc.EgressReadyRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _EgressService_StorageEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(StorageEventRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(EgressServiceServer).StorageEvent(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: EgressService_StorageEvent_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(EgressServiceServer).StorageEvent(ctx, req.(*StorageEventRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\n// EgressService_ServiceDesc is the grpc.ServiceDesc for EgressService service.\n// It's only intended for direct use with grpc.RegisterService,\n// and not to be introspected or modified (even as a copy)\nvar EgressService_ServiceDesc = grpc.ServiceDesc{\n\tServiceName: \"ipc.EgressService\",\n\tHandlerType: (*EgressServiceServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"HandlerReady\",\n\t\t\tHandler:    _EgressService_HandlerReady_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"HandlerUpdate\",\n\t\t\tHandler:    _EgressService_HandlerUpdate_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"HandlerFinished\",\n\t\t\tHandler:    _EgressService_HandlerFinished_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"ReplayReady\",\n\t\t\tHandler:    _EgressService_ReplayReady_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"StorageEvent\",\n\t\t\tHandler:    _EgressService_StorageEvent_Handler,\n\t\t},\n\t},\n\tStreams:  []grpc.StreamDesc{},\n\tMetadata: \"ipc.proto\",\n}\n\nconst (\n\tEgressHandler_GetPipelineDot_FullMethodName = \"/ipc.EgressHandler/GetPipelineDot\"\n\tEgressHandler_GetPProf_FullMethodName       = \"/ipc.EgressHandler/GetPProf\"\n\tEgressHandler_GetMetrics_FullMethodName     = \"/ipc.EgressHandler/GetMetrics\"\n\tEgressHandler_KillEgress_FullMethodName     = \"/ipc.EgressHandler/KillEgress\"\n)\n\n// EgressHandlerClient is the client API for EgressHandler service.\n//\n// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.\ntype EgressHandlerClient interface {\n\tGetPipelineDot(ctx context.Context, in *GstPipelineDebugDotRequest, opts ...grpc.CallOption) (*GstPipelineDebugDotResponse, error)\n\tGetPProf(ctx context.Context, in *PProfRequest, opts ...grpc.CallOption) (*PProfResponse, error)\n\tGetMetrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error)\n\tKillEgress(ctx context.Context, in *KillEgressRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)\n}\n\ntype egressHandlerClient struct {\n\tcc grpc.ClientConnInterface\n}\n\nfunc NewEgressHandlerClient(cc grpc.ClientConnInterface) EgressHandlerClient {\n\treturn &egressHandlerClient{cc}\n}\n\nfunc (c *egressHandlerClient) GetPipelineDot(ctx context.Context, in *GstPipelineDebugDotRequest, opts ...grpc.CallOption) (*GstPipelineDebugDotResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(GstPipelineDebugDotResponse)\n\terr := c.cc.Invoke(ctx, EgressHandler_GetPipelineDot_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *egressHandlerClient) GetPProf(ctx context.Context, in *PProfRequest, opts ...grpc.CallOption) (*PProfResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(PProfResponse)\n\terr := c.cc.Invoke(ctx, EgressHandler_GetPProf_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *egressHandlerClient) GetMetrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(MetricsResponse)\n\terr := c.cc.Invoke(ctx, EgressHandler_GetMetrics_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *egressHandlerClient) KillEgress(ctx context.Context, in *KillEgressRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(emptypb.Empty)\n\terr := c.cc.Invoke(ctx, EgressHandler_KillEgress_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\n// EgressHandlerServer is the server API for EgressHandler service.\n// All implementations must embed UnimplementedEgressHandlerServer\n// for forward compatibility.\ntype EgressHandlerServer interface {\n\tGetPipelineDot(context.Context, *GstPipelineDebugDotRequest) (*GstPipelineDebugDotResponse, error)\n\tGetPProf(context.Context, *PProfRequest) (*PProfResponse, error)\n\tGetMetrics(context.Context, *MetricsRequest) (*MetricsResponse, error)\n\tKillEgress(context.Context, *KillEgressRequest) (*emptypb.Empty, error)\n\tmustEmbedUnimplementedEgressHandlerServer()\n}\n\n// UnimplementedEgressHandlerServer must be embedded to have\n// forward compatible implementations.\n//\n// NOTE: this should be embedded by value instead of pointer to avoid a nil\n// pointer dereference when methods are called.\ntype UnimplementedEgressHandlerServer struct{}\n\nfunc (UnimplementedEgressHandlerServer) GetPipelineDot(context.Context, *GstPipelineDebugDotRequest) (*GstPipelineDebugDotResponse, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method GetPipelineDot not implemented\")\n}\nfunc (UnimplementedEgressHandlerServer) GetPProf(context.Context, *PProfRequest) (*PProfResponse, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method GetPProf not implemented\")\n}\nfunc (UnimplementedEgressHandlerServer) GetMetrics(context.Context, *MetricsRequest) (*MetricsResponse, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method GetMetrics not implemented\")\n}\nfunc (UnimplementedEgressHandlerServer) KillEgress(context.Context, *KillEgressRequest) (*emptypb.Empty, error) {\n\treturn nil, status.Errorf(codes.Unimplemented, \"method KillEgress not implemented\")\n}\nfunc (UnimplementedEgressHandlerServer) mustEmbedUnimplementedEgressHandlerServer() {}\nfunc (UnimplementedEgressHandlerServer) testEmbeddedByValue()                       {}\n\n// UnsafeEgressHandlerServer may be embedded to opt out of forward compatibility for this service.\n// Use of this interface is not recommended, as added methods to EgressHandlerServer will\n// result in compilation errors.\ntype UnsafeEgressHandlerServer interface {\n\tmustEmbedUnimplementedEgressHandlerServer()\n}\n\nfunc RegisterEgressHandlerServer(s grpc.ServiceRegistrar, srv EgressHandlerServer) {\n\t// If the following call pancis, it indicates UnimplementedEgressHandlerServer was\n\t// embedded by pointer and is nil.  This will cause panics if an\n\t// unimplemented method is ever invoked, so we test this at initialization\n\t// time to prevent it from happening at runtime later due to I/O.\n\tif t, ok := srv.(interface{ testEmbeddedByValue() }); ok {\n\t\tt.testEmbeddedByValue()\n\t}\n\ts.RegisterService(&EgressHandler_ServiceDesc, srv)\n}\n\nfunc _EgressHandler_GetPipelineDot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(GstPipelineDebugDotRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(EgressHandlerServer).GetPipelineDot(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: EgressHandler_GetPipelineDot_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(EgressHandlerServer).GetPipelineDot(ctx, req.(*GstPipelineDebugDotRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _EgressHandler_GetPProf_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(PProfRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(EgressHandlerServer).GetPProf(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: EgressHandler_GetPProf_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(EgressHandlerServer).GetPProf(ctx, req.(*PProfRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _EgressHandler_GetMetrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(MetricsRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(EgressHandlerServer).GetMetrics(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: EgressHandler_GetMetrics_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(EgressHandlerServer).GetMetrics(ctx, req.(*MetricsRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _EgressHandler_KillEgress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(KillEgressRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(EgressHandlerServer).KillEgress(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: EgressHandler_KillEgress_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(EgressHandlerServer).KillEgress(ctx, req.(*KillEgressRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\n// EgressHandler_ServiceDesc is the grpc.ServiceDesc for EgressHandler service.\n// It's only intended for direct use with grpc.RegisterService,\n// and not to be introspected or modified (even as a copy)\nvar EgressHandler_ServiceDesc = grpc.ServiceDesc{\n\tServiceName: \"ipc.EgressHandler\",\n\tHandlerType: (*EgressHandlerServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"GetPipelineDot\",\n\t\t\tHandler:    _EgressHandler_GetPipelineDot_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"GetPProf\",\n\t\t\tHandler:    _EgressHandler_GetPProf_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"GetMetrics\",\n\t\t\tHandler:    _EgressHandler_GetMetrics_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"KillEgress\",\n\t\t\tHandler:    _EgressHandler_KillEgress_Handler,\n\t\t},\n\t},\n\tStreams:  []grpc.StreamDesc{},\n\tMetadata: \"ipc.proto\",\n}\n"
  },
  {
    "path": "pkg/logging/csv.go",
    "content": "// Copyright 2025 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage logging\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype TrackStats struct {\n\tTimestamp       string\n\tPacketsReceived uint64\n\tPaddingReceived uint64\n\tLastReceived    string\n\tPacketsDropped  uint64\n\tPacketsPushed   uint64\n\tSamplesPushed   uint64\n\tLastPushed      string\n\tDrift           time.Duration\n\tMaxDrift        time.Duration\n}\n\ntype StreamStats struct {\n\tTimestamp     string\n\tKeyframes     uint64\n\tOutBytesTotal uint64\n\tOutBytesAcked uint64\n\tInBytesTotal  uint64\n\tInBytesAcked  uint64\n}\n\n// CSVLogger is used for logging data in CSV format. It does not validate columns or data\ntype CSVLogger[T any] struct {\n\tf *os.File\n}\n\nfunc NewCSVLogger[T any](filename string) (*CSVLogger[T], error) {\n\tif !strings.HasSuffix(filename, \".csv\") {\n\t\tfilename = filename + \".csv\"\n\t}\n\tfilename = path.Join(os.TempDir(), filename)\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcolumns := make([]string, 0)\n\tt := reflect.TypeFor[T]()\n\tfor i := range t.NumField() {\n\t\tcolumns = append(columns, t.Field(i).Name)\n\t}\n\n\t_, _ = fmt.Fprintf(f, \"%s\\n\", strings.Join(columns, \",\"))\n\n\treturn &CSVLogger[T]{\n\t\tf: f,\n\t}, nil\n}\n\nfunc (l *CSVLogger[T]) Write(value *T) {\n\tv := reflect.ValueOf(value).Elem()\n\tt := v.Type()\n\n\trow := make([]string, t.NumField())\n\tfor i := range t.NumField() {\n\t\trow[i] = fmt.Sprintf(\"%v\", v.Field(i).Interface())\n\t}\n\n\t_, _ = l.f.WriteString(strings.Join(row, \",\") + \"\\n\")\n}\n\nfunc (l *CSVLogger[T]) Close() {\n\t_ = l.f.Close()\n}\n"
  },
  {
    "path": "pkg/logging/handler.go",
    "content": "package logging\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/frostbyte73/core\"\n\t\"go.uber.org/atomic\"\n\n\t\"github.com/livekit/protocol/logger\"\n)\n\nconst (\n\tchannelSize     = 4096\n\tdropLogThrottle = 10 * time.Second\n)\n\ntype HandlerLogger struct {\n\tch          chan []byte\n\tdone        core.Fuse\n\tdropped     atomic.Int64\n\tlastDropLog atomic.Int64 // unix nanos\n\tl           logger.Logger\n}\n\nfunc NewHandlerLogger(handlerID, egressID string) *HandlerLogger {\n\th := &HandlerLogger{\n\t\tch: make(chan []byte, channelSize),\n\t\tl: logger.GetLogger().WithValues(\n\t\t\t\"handlerID\", handlerID,\n\t\t\t\"egressID\", egressID,\n\t\t),\n\t}\n\tgo h.drain()\n\treturn h\n}\n\nfunc (h *HandlerLogger) Write(p []byte) (int, error) {\n\tcp := make([]byte, len(p))\n\tcopy(cp, p)\n\n\tselect {\n\tcase h.ch <- cp:\n\tdefault:\n\t\tcount := h.dropped.Inc()\n\t\tnow := time.Now().UnixNano()\n\t\tlast := h.lastDropLog.Load()\n\t\tif now-last >= int64(dropLogThrottle) {\n\t\t\tif h.lastDropLog.CompareAndSwap(last, now) {\n\t\t\t\th.l.Warnw(fmt.Sprintf(\"handler logger dropped %d messages\", count), nil)\n\t\t\t\th.dropped.Store(0)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn len(p), nil\n}\n\nfunc (h *HandlerLogger) Close() error {\n\tclose(h.ch)\n\t<-h.done.Watch()\n\treturn nil\n}\n\nfunc (h *HandlerLogger) drain() {\n\tvar buf []byte\n\tvar panicBuf []string\n\n\tdefer func() {\n\t\t// flush remaining buffer\n\t\tif len(buf) > 0 {\n\t\t\th.processLine(string(buf), &panicBuf)\n\t\t}\n\t\t// flush any accumulated panic\n\t\tif len(panicBuf) > 0 {\n\t\t\th.l.Errorw(strings.Join(panicBuf, \"\\n\"), nil)\n\t\t}\n\t\th.done.Break()\n\t}()\n\n\tfor chunk := range h.ch {\n\t\tbuf = append(buf, chunk...)\n\n\t\tfor {\n\t\t\tidx := bytes.IndexByte(buf, '\\n')\n\t\t\tif idx < 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tline := string(buf[:idx])\n\t\t\tbuf = buf[idx+1:]\n\t\t\th.processLine(line, &panicBuf)\n\t\t}\n\t}\n}\n\nfunc (h *HandlerLogger) processLine(line string, panicBuf *[]string) {\n\tif len(line) == 0 {\n\t\treturn\n\t}\n\n\tif line[len(line)-1] == '}' {\n\t\tif len(*panicBuf) > 0 {\n\t\t\th.flushPanic(panicBuf)\n\t\t}\n\t\tfmt.Println(line)\n\t\treturn\n\t}\n\n\t// gstreamer stderr (timestamp-prefixed)\n\tif strings.HasPrefix(line, \"0:00:0\") {\n\t\treturn\n\t}\n\n\t// glib/gobject warnings from gstreamer\n\tif strings.HasPrefix(line, \"(egress:\") {\n\t\th.l.Warnw(line, nil)\n\t\treturn\n\t}\n\n\t// panic entry\n\tif strings.HasPrefix(line, \"panic:\") ||\n\t\tstrings.HasPrefix(line, \"fatal error:\") ||\n\t\tstrings.HasPrefix(line, \"goroutine \") {\n\t\t*panicBuf = append(*panicBuf, line)\n\t\treturn\n\t}\n\n\t// panic accumulation\n\tif len(*panicBuf) > 0 {\n\t\tif h.isPanicContinuation(line) {\n\t\t\t*panicBuf = append(*panicBuf, line)\n\t\t\treturn\n\t\t}\n\t\th.flushPanic(panicBuf)\n\t}\n\n\th.l.Errorw(line, nil)\n}\n\nfunc (h *HandlerLogger) isPanicContinuation(line string) bool {\n\tif line[0] == '\\t' {\n\t\treturn true\n\t}\n\tif strings.HasPrefix(line, \"goroutine \") {\n\t\treturn true\n\t}\n\tif !strings.HasPrefix(line, \"(\") && strings.Contains(line, \"(\") {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (h *HandlerLogger) flushPanic(panicBuf *[]string) {\n\tif len(*panicBuf) > 0 {\n\t\th.l.Errorw(strings.Join(*panicBuf, \"\\n\"), nil)\n\t\t*panicBuf = nil\n\t}\n}\n"
  },
  {
    "path": "pkg/logging/s3.go",
    "content": "// Copyright 2025 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage logging\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/aws/smithy-go/logging\"\n\t\"github.com/linkdata/deadlock\"\n\n\t\"github.com/livekit/protocol/logger\"\n)\n\n// S3Logger only logs aws messages on upload failure\ntype S3Logger struct {\n\tmu   deadlock.Mutex\n\tmsgs []string\n\tidx  int\n}\n\nfunc NewS3Logger() *S3Logger {\n\treturn &S3Logger{\n\t\tmsgs: make([]string, 10),\n\t}\n}\n\nfunc (l *S3Logger) Logf(classification logging.Classification, format string, v ...interface{}) {\n\tformat = \"aws %s: \" + format\n\tv = append([]interface{}{strings.ToLower(string(classification))}, v...)\n\n\tl.mu.Lock()\n\tl.msgs[l.idx%len(l.msgs)] = fmt.Sprintf(format, v...)\n\tl.idx++\n\tl.mu.Unlock()\n}\n\nfunc (l *S3Logger) WriteLogs() {\n\tl.mu.Lock()\n\tsize := len(l.msgs)\n\tfor range size {\n\t\tif msg := l.msgs[l.idx%size]; msg != \"\" {\n\t\t\tlogger.Debugw(msg)\n\t\t}\n\t\tl.idx++\n\t}\n\tl.mu.Unlock()\n}\n"
  },
  {
    "path": "pkg/pipeline/builder/audio.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage builder\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/go-gst/go-gst/gst\"\n\t\"github.com/go-gst/go-gst/gst/app\"\n\t\"github.com/linkdata/deadlock\"\n\t\"go.uber.org/atomic\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\tlksdk \"github.com/livekit/server-sdk-go/v2\"\n)\n\nconst (\n\tleakyQueue    = true\n\tblockingQueue = false\n\n\taudioRateTolerance = 3 * time.Millisecond\n\taudioBinName       = \"audio\"\n)\n\ntype AudioBin struct {\n\tbin  *gstreamer.Bin\n\tconf *config.PipelineConfig\n\n\tmu          deadlock.Mutex\n\tnextID      int\n\tnextChannel livekit.AudioChannel\n\tnames       map[string]string\n\n\taudioPacer *audioPacer\n}\n\ntype driftProcessNotifier interface {\n\tDriftProcessed()\n}\n\ntype audioPacer struct {\n\tpitch               *gst.Element\n\tactive              atomic.Bool\n\tremaining           time.Duration\n\ttc                  driftProcessNotifier\n\ttempoAdjustmentRate float64\n}\n\nfunc (a *audioPacer) start(drift time.Duration) {\n\tif a.pitch == nil || drift == 0 {\n\t\treturn\n\t}\n\tif a.active.Load() {\n\t\tlogger.Errorw(\n\t\t\t\"starting audio pacer, but it's already active\",\n\t\t\terrors.New(\"tempo controller bug\"),\n\t\t)\n\t\treturn\n\t}\n\n\trate := 1 + a.tempoAdjustmentRate\n\tif drift > 0 {\n\t\trate = 1 - a.tempoAdjustmentRate\n\t}\n\tcompensationFactor := 1 / a.tempoAdjustmentRate\n\tdriftNanoseconds := int64(drift)\n\tcompensationNanoseconds := int64(compensationFactor * float64(driftNanoseconds))\n\tcompensationDuration := time.Duration(compensationNanoseconds)\n\n\ta.remaining = compensationDuration.Abs()\n\tlogger.Debugw(\"starting audio pacer\", \"remaining\", a.remaining, \"rate\", rate)\n\ta.pitch.SetArg(\"tempo\", fmt.Sprintf(\"%.2f\", rate))\n\ta.active.Store(true)\n\n}\n\nfunc (a *audioPacer) observeProcessedDuration(d time.Duration) {\n\tif !a.active.Load() {\n\t\treturn\n\t}\n\ta.remaining -= d\n\tif a.remaining <= 0 {\n\t\tlogger.Debugw(\"audio gap processed, stopping the pacer\")\n\t\ta.stop()\n\t\ta.tc.DriftProcessed()\n\t}\n}\n\nfunc (a *audioPacer) stop() {\n\tif a.pitch == nil || a.tc == nil {\n\t\treturn\n\t}\n\ta.pitch.SetArg(\"tempo\", fmt.Sprintf(\"%.1f\", 1.0))\n\ta.active.Store(false)\n\ta.remaining = 0\n}\n\nfunc BuildAudioBin(pipeline *gstreamer.Pipeline, p *config.PipelineConfig) error {\n\tb := &AudioBin{\n\t\tbin:   pipeline.NewBin(audioBinName),\n\t\tconf:  p,\n\t\tnames: make(map[string]string),\n\t}\n\n\tswitch p.SourceType {\n\tcase types.SourceTypeWeb:\n\t\tif err := b.buildWebInput(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase types.SourceTypeSDK:\n\t\tif err := b.buildSDKInput(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpipeline.AddOnTrackAdded(b.onTrackAdded)\n\t\tpipeline.AddOnTrackRemoved(b.onTrackRemoved)\n\t\tpipeline.AddOnSourceBinReset(b.onSourceBinReset)\n\t}\n\n\tif len(p.GetEncodedOutputs()) > 1 {\n\t\ttee, err := gst.NewElementWithName(\"tee\", fmt.Sprintf(\"%s_tee\", audioBinName))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = b.bin.AddElement(tee); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tqueue, err := gstreamer.BuildQueue(fmt.Sprintf(\"%s_queue\", audioBinName), p.Latency.PipelineLatency, p.Live)\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = b.bin.AddElement(queue); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn pipeline.AddSourceBin(b.bin)\n}\n\nfunc (b *AudioBin) onTrackAdded(ts *config.TrackSource) {\n\tif b.bin.GetState() > gstreamer.StateRunning {\n\t\treturn\n\t}\n\n\tif ts.TrackKind == lksdk.TrackKindAudio {\n\t\tlogger.Debugw(\"adding audio app src bin\", \"trackID\", ts.TrackID)\n\t\tif err := b.addAudioAppSrcBin(ts); err != nil {\n\t\t\tlogger.Errorw(\"failed to add audio app src bin\", err, \"trackID\", ts.TrackID)\n\t\t\tb.bin.OnError(err)\n\t\t}\n\t}\n}\n\nfunc (b *AudioBin) onTrackRemoved(trackID string) {\n\tif b.bin.GetState() > gstreamer.StateRunning {\n\t\treturn\n\t}\n\n\tb.mu.Lock()\n\tname, ok := b.names[trackID]\n\tif !ok {\n\t\tb.mu.Unlock()\n\t\treturn\n\t}\n\tdelete(b.names, trackID)\n\tb.mu.Unlock()\n\n\tif err := b.bin.RemoveSourceBin(name); err != nil {\n\t\tb.bin.OnError(err)\n\t}\n}\n\nfunc (b *AudioBin) buildWebInput() error {\n\tpulseSrc, err := gst.NewElement(\"pulsesrc\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = pulseSrc.SetProperty(\"device\", fmt.Sprintf(\"%s.monitor\", b.conf.Info.EgressId)); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = b.bin.AddElement(pulseSrc); err != nil {\n\t\treturn err\n\t}\n\n\tif err = addAudioConverter(b.bin, b.conf, livekit.AudioChannel_AUDIO_CHANNEL_BOTH, leakyQueue); err != nil {\n\t\treturn err\n\t}\n\tif b.conf.AudioTranscoding {\n\t\tif err = b.addEncoder(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (b *AudioBin) buildSDKInput() error {\n\tfor _, tr := range b.conf.AudioTracks {\n\t\tif err := b.addAudioAppSrcBin(tr); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif b.conf.Live {\n\t\tif err := b.addAudioTestSrcBin(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := b.addMixer(); err != nil {\n\t\treturn err\n\t}\n\tif b.conf.AudioTranscoding {\n\t\tif err := b.addEncoder(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (b *AudioBin) addAudioAppSrcBin(ts *config.TrackSource) error {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\treturn b.addAudioAppSrcBinLocked(ts)\n}\n\nfunc (b *AudioBin) addAudioAppSrcBinLocked(ts *config.TrackSource) error {\n\tname := fmt.Sprintf(\"%s_%d\", ts.TrackID, b.nextID)\n\tb.nextID++\n\tb.names[ts.TrackID] = name\n\n\tappSrcBin := b.bin.NewBin(name)\n\tappSrcBin.SetEOSFunc(func() bool {\n\t\treturn false\n\t})\n\tts.AppSrc.SetArg(\"format\", \"time\")\n\tif err := ts.AppSrc.SetProperty(\"is-live\", b.conf.Live); err != nil {\n\t\treturn err\n\t}\n\tif !b.conf.Live {\n\t\tif err := ts.AppSrc.SetProperty(\"block\", true); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := appSrcBin.AddElement(ts.AppSrc.Element); err != nil {\n\t\treturn err\n\t}\n\n\tswitch ts.MimeType {\n\tcase types.MimeTypeOpus:\n\t\tif err := ts.AppSrc.SetProperty(\"caps\", gst.NewCapsFromString(fmt.Sprintf(\n\t\t\t\"application/x-rtp,media=audio,payload=%d,encoding-name=OPUS,clock-rate=%d\",\n\t\t\tts.PayloadType, ts.ClockRate,\n\t\t))); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\trtpOpusDepay, err := gst.NewElement(\"rtpopusdepay\")\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\topusDec, err := gst.NewElement(\"opusdec\")\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\tif err = appSrcBin.AddElements(rtpOpusDepay, opusDec); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase types.MimeTypePCMU:\n\t\tif err := ts.AppSrc.SetProperty(\"caps\", gst.NewCapsFromString(fmt.Sprintf(\n\t\t\t\"application/x-rtp,media=audio,payload=%d,encoding-name=PCMU,clock-rate=%d\",\n\t\t\tts.PayloadType, ts.ClockRate,\n\t\t))); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\trtpPCMUDepay, err := gst.NewElement(\"rtppcmudepay\")\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\tmulawDec, err := gst.NewElement(\"mulawdec\")\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\tif err = appSrcBin.AddElements(rtpPCMUDepay, mulawDec); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase types.MimeTypePCMA:\n\t\tif err := ts.AppSrc.SetProperty(\"caps\", gst.NewCapsFromString(fmt.Sprintf(\n\t\t\t\"application/x-rtp,media=audio,payload=%d,encoding-name=PCMA,clock-rate=%d\",\n\t\t\tts.PayloadType, ts.ClockRate,\n\t\t))); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\trtpPCMADepay, err := gst.NewElement(\"rtppcmadepay\")\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\talawDec, err := gst.NewElement(\"alawdec\")\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\tif err = appSrcBin.AddElements(rtpPCMADepay, alawDec); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tdefault:\n\t\treturn errors.ErrNotSupported(string(ts.MimeType))\n\t}\n\n\taddAudioConvertFunc := addAudioConverter\n\tif b.conf.AudioTempoController.Enabled {\n\t\taddAudioConvertFunc = b.addAudioConvertWithPitch\n\t}\n\n\tif err := addAudioConvertFunc(appSrcBin, b.conf, b.getChannelLocked(ts), blockingQueue); err != nil {\n\t\treturn err\n\t}\n\n\tif err := b.bin.AddSourceBin(appSrcBin); err != nil {\n\t\treturn err\n\t}\n\n\tif ts.TempoController != nil {\n\t\tts.TempoController.OnDriftDetectedCallback(func(drift time.Duration) {\n\t\t\tif b.audioPacer.pitch != nil {\n\t\t\t\tlogger.Debugw(\"starting audio pacer to cover the drift\", \"drift\", drift)\n\t\t\t\tb.audioPacer.start(drift)\n\t\t\t}\n\t\t})\n\t\tb.audioPacer.tc = ts.TempoController\n\t}\n\n\treturn nil\n}\n\nfunc (b *AudioBin) onSourceBinReset(ts *config.TrackSource) error {\n\tif ts.TrackKind != lksdk.TrackKindAudio {\n\t\treturn nil\n\t}\n\treturn b.resetAudioAppSrcBin(ts)\n}\n\nfunc (b *AudioBin) resetAudioAppSrcBin(ts *config.TrackSource) error {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\toldName, ok := b.names[ts.TrackID]\n\tif !ok {\n\t\treturn errors.New(\"track already removed, cannot reset audio source bin\")\n\t}\n\n\tif b.bin.GetState() > gstreamer.StateRunning {\n\t\treturn errors.New(\"pipeline stopping, cannot reset audio source bin\")\n\t}\n\n\t// Force-remove old bin (blocks on GLib main loop, safe to hold b.mu since\n\t// ForceRemoveSourceBin only acquires gstreamer.Bin's internal mutex)\n\tif err := b.bin.ForceRemoveSourceBin(oldName); err != nil {\n\t\treturn fmt.Errorf(\"failed to force remove audio source bin: %w\", err)\n\t}\n\n\tnewElement, err := gst.NewElementWithName(\"appsrc\", fmt.Sprintf(\"app_%s\", ts.TrackID))\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tts.AppSrc = app.SrcFromElement(newElement)\n\n\tif err := b.addAudioAppSrcBinLocked(ts); err != nil {\n\t\treturn fmt.Errorf(\"failed to add new audio source bin: %w\", err)\n\t}\n\n\tlogger.Infow(\"audio source bin reset complete\", \"trackID\", ts.TrackID, \"newBin\", b.names[ts.TrackID])\n\treturn nil\n}\n\nfunc (b *AudioBin) getChannelLocked(ts *config.TrackSource) livekit.AudioChannel {\n\tif ts.AudioChannel != nil {\n\t\treturn *ts.AudioChannel\n\t}\n\n\tswitch b.conf.AudioMixing {\n\tcase livekit.AudioMixing_DEFAULT_MIXING:\n\t\treturn livekit.AudioChannel_AUDIO_CHANNEL_BOTH\n\n\tcase livekit.AudioMixing_DUAL_CHANNEL_AGENT:\n\t\tif ts.ParticipantKind == lksdk.ParticipantAgent {\n\t\t\treturn livekit.AudioChannel_AUDIO_CHANNEL_LEFT\n\t\t}\n\t\treturn livekit.AudioChannel_AUDIO_CHANNEL_RIGHT\n\n\tcase livekit.AudioMixing_DUAL_CHANNEL_ALTERNATE:\n\t\tif b.nextChannel == livekit.AudioChannel_AUDIO_CHANNEL_LEFT {\n\t\t\tb.nextChannel = livekit.AudioChannel_AUDIO_CHANNEL_RIGHT\n\t\t} else {\n\t\t\tb.nextChannel = livekit.AudioChannel_AUDIO_CHANNEL_LEFT\n\t\t}\n\t\treturn b.nextChannel\n\t}\n\n\treturn livekit.AudioChannel_AUDIO_CHANNEL_BOTH\n}\n\nfunc (b *AudioBin) addAudioTestSrcBin() error {\n\ttestSrcBin := b.bin.NewBin(fmt.Sprintf(\"%s_test_src\", audioBinName))\n\tif err := b.bin.AddSourceBin(testSrcBin); err != nil {\n\t\treturn err\n\t}\n\n\taudioTestSrc, err := gst.NewElement(\"audiotestsrc\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = audioTestSrc.SetProperty(\"volume\", 0.0); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = audioTestSrc.SetProperty(\"do-timestamp\", true); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = audioTestSrc.SetProperty(\"is-live\", true); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\t// 20 ms @ 48 kHz\n\tif err = audioTestSrc.SetProperty(\"samplesperbuffer\", 960); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\taudioCaps, err := newAudioCapsFilter(b.conf, livekit.AudioChannel_AUDIO_CHANNEL_BOTH)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn testSrcBin.AddElements(audioTestSrc, audioCaps)\n}\n\nfunc (b *AudioBin) addMixer() error {\n\taudioMixer, err := gst.NewElement(\"audiomixer\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = audioMixer.SetProperty(\"latency\", uint64(b.conf.Latency.AudioMixerLatency)); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = audioMixer.SetProperty(\"alignment-threshold\", uint64(b.conf.Latency.PipelineLatency)); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tmixedCaps, err := newAudioCapsFilter(b.conf, livekit.AudioChannel_AUDIO_CHANNEL_BOTH)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubscribeForQoS(audioMixer)\n\n\treturn b.bin.AddElements(audioMixer, mixedCaps)\n}\n\nfunc (b *AudioBin) addEncoder() error {\n\tswitch b.conf.AudioOutCodec {\n\tcase types.MimeTypeOpus:\n\t\topusEnc, err := gst.NewElement(\"opusenc\")\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = opusEnc.SetProperty(\"bitrate\", int(b.conf.AudioBitrate*1000)); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\treturn b.bin.AddElement(opusEnc)\n\n\tcase types.MimeTypeAAC:\n\t\tfaac, err := gst.NewElement(\"faac\")\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = faac.SetProperty(\"bitrate\", int(b.conf.AudioBitrate*1000)); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\treturn b.bin.AddElement(faac)\n\n\tcase types.MimeTypeMP3:\n\t\tmp3enc, err := gst.NewElement(\"lamemp3enc\")\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\t// target=bitrate is required for cbr and bitrate to take effect;\n\t\t// without it lamemp3enc defaults to quality-based VBR.\n\t\tmp3enc.SetArg(\"target\", \"bitrate\")\n\t\tif err = mp3enc.SetProperty(\"cbr\", true); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = mp3enc.SetProperty(\"bitrate\", int(b.conf.AudioBitrate)); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\treturn b.bin.AddElement(mp3enc)\n\n\tcase types.MimeTypeRawAudio:\n\t\treturn nil\n\n\tdefault:\n\t\treturn errors.ErrNotSupported(string(b.conf.AudioOutCodec))\n\t}\n}\n\nfunc addAudioConverter(b *gstreamer.Bin, p *config.PipelineConfig, channel livekit.AudioChannel, isLeaky bool) error {\n\trate, err := gstreamer.BuildAudioRate(\"audio_rate\", audioRateTolerance)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taudioQueue, err := gstreamer.BuildQueue(fmt.Sprintf(\"%s_input_queue\", audioBinName), p.Latency.PipelineLatency, isLeaky)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taudioConvert, err := gst.NewElement(\"audioconvert\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\taudioResample, err := gst.NewElement(\"audioresample\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tcapsFilter, err := newAudioCapsFilter(p, channel)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn b.AddElements(rate, audioQueue, audioConvert, audioResample, capsFilter)\n}\n\nfunc (b *AudioBin) installPitchProbes() {\n\tif b.audioPacer.pitch == nil {\n\t\treturn\n\t}\n\tif sinkPad := b.audioPacer.pitch.GetStaticPad(\"sink\"); sinkPad != nil {\n\t\tsinkPad.AddProbe(gst.PadProbeTypeBuffer, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn {\n\t\t\tif !b.audioPacer.active.Load() {\n\t\t\t\treturn gst.PadProbeOK\n\t\t\t}\n\t\t\tif buf := info.GetBuffer(); buf != nil && buf.Duration() != gst.ClockTimeNone {\n\t\t\t\tb.audioPacer.observeProcessedDuration(*buf.Duration().AsDuration())\n\t\t\t}\n\t\t\treturn gst.PadProbeOK\n\t\t})\n\t}\n\tif srcPad := b.audioPacer.pitch.GetStaticPad(\"src\"); srcPad != nil {\n\t\t// pitch element min latency can go negative, so we need to normalize it\n\t\t// to workaround the obvious issue with the element latency query handling\n\t\tsrcPad.AddProbe(gst.PadProbeTypeQueryUpstream|gst.PadProbeTypePull,\n\t\t\tfunc(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn {\n\t\t\t\tq := info.GetQuery()\n\t\t\t\tif q == nil || q.Type() != gst.QueryLatency {\n\t\t\t\t\treturn gst.PadProbeOK\n\t\t\t\t}\n\n\t\t\t\tlive, minimum, maximum := q.ParseLatency()\n\t\t\t\t// Normalize: ensure min <= max\n\t\t\t\tif minimum > maximum {\n\t\t\t\t\tlogger.Debugw(\"normalizing min latency to 0\", \"min\", minimum)\n\t\t\t\t\tminimum = 0\n\t\t\t\t}\n\t\t\t\tq.SetLatency(live, minimum, maximum)\n\t\t\t\treturn gst.PadProbeOK\n\t\t\t},\n\t\t)\n\t}\n}\n\nfunc (b *AudioBin) addAudioConvertWithPitch(bin *gstreamer.Bin, p *config.PipelineConfig, channel livekit.AudioChannel, isLeaky bool) error {\n\t// add audio rate element to handle discontinuities or codec DTX\n\trate, err := gstreamer.BuildAudioRate(\"audio_rate\", audioRateTolerance)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tq, err := gstreamer.BuildQueue(fmt.Sprintf(\"%s_input_queue\", audioBinName), p.Latency.PipelineLatency, isLeaky)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tac1, err := gst.NewElement(\"audioconvert\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tar1, err := gst.NewElement(\"audioresample\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\t// go to float for pitch element\n\tf32caps, err := newAudioFloatCapsFilter(p, channel)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpitch, err := gst.NewElement(\"pitch\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tpitch.SetArg(\"tempo\", fmt.Sprintf(\"%.1f\", 1.0))\n\n\tac2, err := gst.NewElement(\"audioconvert\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\t// back to pipeline/native format\n\ts16caps, err := newAudioCapsFilter(p, channel)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// keep a handle for pacer control\n\tb.audioPacer = &audioPacer{\n\t\tpitch:               pitch,\n\t\ttempoAdjustmentRate: p.AudioTempoController.AdjustmentRate,\n\t}\n\n\tb.installPitchProbes()\n\n\treturn bin.AddElements(rate, q, ac1, ar1, f32caps, pitch, ac2, s16caps)\n}\n\n// F32 caps used only around `pitch`\nfunc newAudioFloatCapsFilter(p *config.PipelineConfig, channel livekit.AudioChannel) (*gst.Element, error) {\n\tvar channelCaps string\n\tif channel == livekit.AudioChannel_AUDIO_CHANNEL_BOTH {\n\t\tchannelCaps = \"channels=2\"\n\t} else {\n\t\tchannelCaps = fmt.Sprintf(\"channels=1,channel-mask=(bitmask)0x%d\", channel)\n\t}\n\trate := 48000\n\tif p.AudioOutCodec == types.MimeTypeAAC {\n\t\trate = int(p.AudioFrequency)\n\t}\n\tcaps := gst.NewCapsFromString(fmt.Sprintf(\"audio/x-raw,format=F32LE,layout=interleaved,rate=%d,%s\", rate, channelCaps))\n\n\tcf, err := gst.NewElement(\"capsfilter\")\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif err = cf.SetProperty(\"caps\", caps); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\treturn cf, nil\n}\n\nfunc newAudioCapsFilter(p *config.PipelineConfig, channel livekit.AudioChannel) (*gst.Element, error) {\n\tvar channelCaps string\n\tif channel == livekit.AudioChannel_AUDIO_CHANNEL_BOTH {\n\t\tchannelCaps = \"channels=2\"\n\t} else {\n\t\tchannelCaps = fmt.Sprintf(\"channels=1,channel-mask=(bitmask)0x%d\", channel)\n\t}\n\n\tvar caps *gst.Caps\n\tswitch p.AudioOutCodec {\n\tcase types.MimeTypeOpus, types.MimeTypeRawAudio:\n\t\tcaps = gst.NewCapsFromString(fmt.Sprintf(\n\t\t\t\"audio/x-raw,format=S16LE,layout=interleaved,rate=48000,%s\",\n\t\t\tchannelCaps,\n\t\t))\n\tcase types.MimeTypeAAC, types.MimeTypeMP3:\n\t\tcaps = gst.NewCapsFromString(fmt.Sprintf(\n\t\t\t\"audio/x-raw,format=S16LE,layout=interleaved,rate=%d,%s\",\n\t\t\tp.AudioFrequency, channelCaps,\n\t\t))\n\tdefault:\n\t\treturn nil, errors.ErrNotSupported(string(p.AudioOutCodec))\n\t}\n\n\tcapsFilter, err := gst.NewElement(\"capsfilter\")\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif err = capsFilter.SetProperty(\"caps\", caps); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\treturn capsFilter, nil\n}\n\nfunc subscribeForQoS(mixer *gst.Element) {\n\tmixer.Connect(\"pad-added\", func(_ *gst.Element, pad *gst.Pad) {\n\t\tif err := pad.SetProperty(\"qos-messages\", true); err != nil {\n\t\t\tlogger.Errorw(\"failed to set QoS messages on pad\", err)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "pkg/pipeline/builder/file.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage builder\n\nimport (\n\t\"github.com/go-gst/go-gst/gst\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/types\"\n)\n\nfunc BuildFileBin(pipeline *gstreamer.Pipeline, p *config.PipelineConfig) (*gstreamer.Bin, error) {\n\tb := pipeline.NewBin(\"file\")\n\to := p.GetFileConfig()\n\n\tvar mux muxer\n\tvar err error\n\tswitch o.OutputType {\n\tcase types.OutputTypeOGG:\n\t\tmux, err = newMuxer(\"oggmux\")\n\tcase types.OutputTypeIVF:\n\t\tmux, err = newMuxer(\"avmux_ivf\")\n\tcase types.OutputTypeMP4:\n\t\tmux, err = newMuxer(\"mp4mux\")\n\tcase types.OutputTypeWebM:\n\t\tmux, err = newMuxer(\"webmmux\")\n\tcase types.OutputTypeMP3:\n\t\tmux, err = newMP3Muxer()\n\n\tdefault:\n\t\treturn nil, errors.ErrInvalidInput(\"output type\")\n\t}\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tsink, err := gst.NewElement(\"filesink\")\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif err = sink.SetProperty(\"location\", o.LocalFilepath); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif err = sink.SetProperty(\"sync\", false); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif !p.Live {\n\t\tif err = sink.SetProperty(\"async\", false); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t}\n\n\tif err = b.AddElements(mux.GetElement(), sink); err != nil {\n\t\treturn nil, err\n\t}\n\n\tb.SetGetSrcPad(func(name string) *gst.Pad {\n\t\treturn mux.GetRequestPad(name + \"_%u\")\n\t})\n\n\treturn b, nil\n}\n"
  },
  {
    "path": "pkg/pipeline/builder/image.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage builder\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com/go-gst/go-gst/gst\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/types\"\n)\n\nconst (\n\timageQueueLatency = 200 * time.Millisecond\n)\n\nfunc BuildImageBin(c *config.ImageConfig, pipeline *gstreamer.Pipeline, p *config.PipelineConfig) (*gstreamer.Bin, error) {\n\tb := pipeline.NewBin(fmt.Sprintf(\"image_%s\", c.Id))\n\n\tvar err error\n\tvar fakeAudio *gst.Element\n\tif p.AudioEnabled {\n\t\tfakeAudio, err = gst.NewElement(\"fakesink\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tqueue, err := gstreamer.BuildQueue(fmt.Sprintf(\"image_queue_%s\", c.Id), imageQueueLatency, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := b.AddElements(queue); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tb.SetGetSrcPad(func(name string) *gst.Pad {\n\t\tif name == audioBinName {\n\t\t\treturn fakeAudio.GetStaticPad(\"sink\")\n\t\t}\n\t\treturn queue.GetStaticPad(\"sink\")\n\t})\n\tb.SetShouldLink(func(srcBin string) bool {\n\t\treturn srcBin != audioBinName\n\t})\n\n\tvideoRate, err := gst.NewElement(\"videorate\")\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif err = videoRate.SetProperty(\"skip-to-first\", true); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := b.AddElements(videoRate); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tvideoScale, err := gst.NewElement(\"videoscale\")\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif err := b.AddElements(videoScale); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tcaps, err := gst.NewElement(\"capsfilter\")\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tcapsString := fmt.Sprintf(\n\t\t\"video/x-raw,framerate=1/%d,format=I420,colorimetry=bt709,chroma-site=mpeg2,pixel-aspect-ratio=1/1\",\n\t\tc.CaptureInterval)\n\n\tif c.Width > 0 && c.Height > 0 {\n\t\tcapsString = fmt.Sprintf(\"%s,width=%d,height=%d,\", capsString, c.Width, c.Height)\n\t}\n\n\terr = caps.SetProperty(\"caps\", gst.NewCapsFromString(capsString))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := b.AddElements(caps); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tswitch c.ImageOutCodec {\n\tcase types.MimeTypeJPEG:\n\t\tenc, err := gst.NewElement(\"jpegenc\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err := b.AddElements(enc); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\tdefault:\n\t\treturn nil, errors.ErrNoCompatibleCodec\n\t}\n\n\tsink, err := gst.NewElementWithName(\"multifilesink\", fmt.Sprintf(\"multifilesink_%s\", c.Id))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = sink.SetProperty(\"post-messages\", true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// File will be renamed if the TS prefix is configured\n\tlocation := fmt.Sprintf(\"%s_%%05d%s\", path.Join(c.LocalDir, c.ImagePrefix), types.FileExtensionForOutputType[c.OutputType])\n\n\terr = sink.SetProperty(\"location\", location)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = b.AddElements(sink); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\treturn b, nil\n}\n"
  },
  {
    "path": "pkg/pipeline/builder/muxer.go",
    "content": "// Copyright 2025 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage builder\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/go-gst/go-gst/gst\"\n)\n\n// muxer captures the minimal behavior builders need from a muxing element, allowing\n// us to swap between real gst muxers and light-weight shims (e.g. xingmux for MP3).\ntype muxer interface {\n\tGetRequestPad(name string) *gst.Pad\n\tGetElement() *gst.Element\n}\n\n// muxerImpl wraps a concrete gst.Element so it satisfies the muxer interface.\ntype muxerImpl struct {\n\t*gst.Element\n}\n\n// newMuxer constructs a wrapper around the named gst muxer element.\nfunc newMuxer(elementName string) (*muxerImpl, error) {\n\telement, err := gst.NewElement(elementName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif factory := element.GetFactory(); factory != nil {\n\t\tif klass := factory.GetMetadata(\"klass\"); !strings.Contains(klass, \"Muxer\") {\n\t\t\telement.Unref()\n\t\t\treturn nil, fmt.Errorf(\"element %s is not a muxer\", elementName)\n\t\t}\n\t}\n\treturn &muxerImpl{\n\t\tElement: element,\n\t}, nil\n}\n\nfunc (m *muxerImpl) GetRequestPad(name string) *gst.Pad {\n\treturn m.Element.GetRequestPad(name)\n}\n\nfunc (m *muxerImpl) GetElement() *gst.Element {\n\treturn m.Element\n}\n\n// mp3Muxer wraps xingmux as a muxer so audio-only MP3 outputs\n// can reuse the same linking logic as containerised formats.\ntype mp3Muxer struct {\n\tmuxerImpl\n}\n\n// newMP3Muxer provides a muxer-compatible wrapper around gst xingmux.\n// xingmux inserts a Xing header containing total frame and byte counts,\n// allowing players to determine the file duration without scanning every frame.\nfunc newMP3Muxer() (*mp3Muxer, error) {\n\txing, err := gst.NewElement(\"xingmux\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &mp3Muxer{\n\t\tmuxerImpl: muxerImpl{\n\t\t\tElement: xing,\n\t\t},\n\t}, nil\n}\n\n// GetRequestPad always returns the static sink pad to satisfy the muxer contract.\nfunc (m *mp3Muxer) GetRequestPad(_ string) *gst.Pad {\n\treturn m.GetStaticPad(\"sink\")\n}\n"
  },
  {
    "path": "pkg/pipeline/builder/muxer_test.go",
    "content": "package builder\n\nimport (\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/go-gst/go-gst/gst\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nvar gstInitOnce sync.Once\n\nfunc initGStreamer(t *testing.T) {\n\tt.Helper()\n\tgstInitOnce.Do(func() { gst.Init(nil) })\n}\n\nfunc TestNewMuxer_KnownMuxers(t *testing.T) {\n\tinitGStreamer(t)\n\n\tfor _, name := range []string{\"oggmux\", \"avmux_ivf\", \"mp4mux\", \"webmmux\", \"mpegtsmux\"} {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tm, err := newMuxer(name)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, m)\n\t\t\trequire.NotNil(t, m.GetElement())\n\t\t})\n\t}\n}\n\nfunc TestNewMuxer_InvalidMuxer(t *testing.T) {\n\tinitGStreamer(t)\n\n\t_, err := newMuxer(\"identity\")\n\trequire.Error(t, err)\n\trequire.True(t, strings.Contains(err.Error(), \"not a muxer\"), \"unexpected error: %v\", err)\n}\n\nfunc TestNewMP3Muxer(t *testing.T) {\n\tinitGStreamer(t)\n\n\tm, err := newMP3Muxer()\n\trequire.NoError(t, err)\n\trequire.NotNil(t, m.GetRequestPad(\"unused\"))\n}\n"
  },
  {
    "path": "pkg/pipeline/builder/pts_fixer.go",
    "content": "package builder\n\nimport (\n\t\"github.com/go-gst/go-gst/gst\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\n// PTSFixer wraps a gst element and restores missing PTS values on its src pad\n// so downstream elements observe a monotonic timeline even when upstream elements\n// emit GST_CLOCK_TIME_NONE buffers (e.g. due to baseparse bugs).\ntype ptsFixer struct {\n\t*gst.Element\n\tpad     *gst.Pad\n\tprobe   uint64\n\tlast    uint64\n\tptsSeen bool\n\tlog     logger.Logger\n}\n\nfunc newPTSFixer(elementName, context string) (*ptsFixer, error) {\n\telement, err := gst.NewElement(elementName)\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tpad := element.GetStaticPad(\"src\")\n\tif pad == nil {\n\t\telement.Unref()\n\t\treturn nil, errors.ErrGstPipelineError(newMissingPadError(elementName, \"src\"))\n\t}\n\n\tfixer := &ptsFixer{\n\t\tElement: element,\n\t\tpad:     pad,\n\t\tlog:     logger.GetLogger().WithValues(\"component\", \"pts_fixer\", \"context\", context, \"element\", elementName),\n\t}\n\tfixer.probe = pad.AddProbe(gst.PadProbeTypeBuffer, fixer.onBuffer)\n\n\treturn fixer, nil\n}\n\nfunc (f *ptsFixer) onBuffer(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn {\n\tbuf := info.GetBuffer()\n\tif buf == nil {\n\t\treturn gst.PadProbeOK\n\t}\n\n\tpts := buf.PresentationTimestamp()\n\tif pts == gst.ClockTimeNone {\n\t\tif !f.ptsSeen {\n\t\t\treturn gst.PadProbeOK\n\t\t}\n\n\t\trestored := gst.ClockTime(f.last)\n\t\tbuf.SetPresentationTimestamp(restored)\n\t\tf.log.Debugw(\"restored missing pts from previous buffer\", \"pts\", restored)\n\n\t\treturn gst.PadProbeOK\n\t}\n\n\tf.last = uint64(pts)\n\tf.ptsSeen = true\n\treturn gst.PadProbeOK\n}\n"
  },
  {
    "path": "pkg/pipeline/builder/segment.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage builder\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com/go-gst/go-gst/gst\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\ntype FirstSampleMetadata struct {\n\tStartDate int64 // Real time date of the first media sample\n}\n\nfunc BuildSegmentBin(pipeline *gstreamer.Pipeline, p *config.PipelineConfig) (*gstreamer.Bin, error) {\n\tb := pipeline.NewBin(\"segment\")\n\to := p.GetSegmentConfig()\n\n\tvar h264ParseFixer *ptsFixer\n\n\tvar err error\n\tif p.VideoEnabled {\n\t\th264ParseFixer, err = newPTSFixer(\"h264parse\", \"segment:h264\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err = b.AddElements(h264ParseFixer.Element); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t}\n\n\tsink, err := gst.NewElement(\"splitmuxsink\")\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif err = sink.SetProperty(\"max-size-time\", uint64(time.Duration(o.SegmentDuration)*time.Second)); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif err = sink.SetProperty(\"send-keyframe-requests\", true); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tif err = sink.SetProperty(\"muxer-factory\", \"mpegtsmux\"); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tvar startDate time.Time\n\t_, err = sink.Connect(\"format-location-full\", func(_ *gst.Element, fragmentId uint, firstSample *gst.Sample) string {\n\t\tvar pts time.Duration\n\t\tif firstSample != nil && firstSample.GetBuffer() != nil {\n\t\t\tpts = *firstSample.GetBuffer().PresentationTimestamp().AsDuration()\n\t\t} else {\n\t\t\tlogger.Infow(\"nil sample passed into 'format-location-full' event handler, assuming 0 pts\")\n\t\t}\n\n\t\tif startDate.IsZero() {\n\t\t\tnow := time.Now()\n\n\t\t\tstartDate = now.Add(-pts)\n\n\t\t\tmdata := FirstSampleMetadata{\n\t\t\t\tStartDate: now.UnixNano(),\n\t\t\t}\n\t\t\tstr := gst.MarshalStructure(mdata)\n\t\t\tmsg := gst.NewElementMessage(sink, str)\n\t\t\tsink.GetBus().Post(msg)\n\t\t}\n\n\t\tvar segmentName string\n\t\tswitch o.SegmentSuffix {\n\t\tcase livekit.SegmentedFileSuffix_TIMESTAMP:\n\t\t\tts := startDate.Add(pts)\n\t\t\tsegmentName = fmt.Sprintf(\"%s_%s%03d.ts\", o.SegmentPrefix, ts.Format(\"20060102150405\"), ts.UnixMilli()%1000)\n\t\tdefault:\n\t\t\tsegmentName = fmt.Sprintf(\"%s_%05d.ts\", o.SegmentPrefix, fragmentId)\n\t\t}\n\t\treturn path.Join(o.LocalDir, segmentName)\n\t})\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tif err = b.AddElements(sink); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tb.SetGetSrcPad(func(name string) *gst.Pad {\n\t\tif name == audioBinName {\n\t\t\treturn sink.GetRequestPad(\"audio_%u\")\n\t\t} else if h264ParseFixer != nil {\n\t\t\treturn h264ParseFixer.GetStaticPad(\"sink\")\n\t\t}\n\t\t// Should never happen\n\t\treturn nil\n\n\t})\n\n\treturn b, nil\n}\n"
  },
  {
    "path": "pkg/pipeline/builder/stream.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage builder\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/go-gst/go-gst/gst\"\n\t\"go.uber.org/atomic\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/logging\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/utils\"\n)\n\ntype StreamBin struct {\n\tBin        *gstreamer.Bin\n\tOutputType types.OutputType\n\n\tlatency time.Duration\n}\n\ntype Stream struct {\n\tConf *config.Stream\n\tBin  *gstreamer.Bin\n\n\toutputType     types.OutputType\n\tsink           *gst.Element\n\tkeyframes      atomic.Uint64\n\treconnections  atomic.Int32\n\tdisconnectedAt atomic.Time\n\tfailed         atomic.Bool\n}\n\nfunc BuildStreamBin(pipeline *gstreamer.Pipeline, p *config.PipelineConfig, o *config.StreamConfig) (*StreamBin, error) {\n\tb := pipeline.NewBin(\"stream\")\n\n\tvar mux *gst.Element\n\tvar err error\n\tswitch o.OutputType {\n\tcase types.OutputTypeRTMP:\n\t\tmux, err = gst.NewElement(\"flvmux\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = mux.SetProperty(\"streamable\", true); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = mux.SetProperty(\"skip-backwards-streams\", true); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\t// add latency to give time for flvmux to receive and order packets from both streams\n\t\tif err = mux.SetProperty(\"latency\", uint64(p.Latency.PipelineLatency)); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\tb.SetGetSrcPad(func(name string) *gst.Pad {\n\t\t\treturn mux.GetRequestPad(name)\n\t\t})\n\n\tcase types.OutputTypeSRT:\n\t\tmux, err = gst.NewElement(\"mpegtsmux\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = mux.SetProperty(\"latency\", uint64(p.Latency.PipelineLatency)); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\n\tdefault:\n\t\terr = errors.ErrInvalidInput(\"output type\")\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttee, err := gst.NewElement(\"tee\")\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif err = tee.SetProperty(\"allow-not-linked\", true); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tif err = b.AddElements(mux, tee); err != nil {\n\t\treturn nil, err\n\t}\n\n\tsb := &StreamBin{\n\t\tBin:        b,\n\t\tOutputType: o.OutputType,\n\t\tlatency:    p.Latency.PipelineLatency,\n\t}\n\n\treturn sb, nil\n}\n\nfunc (sb *StreamBin) BuildStream(stream *config.Stream, framerate int32) (*Stream, error) {\n\tstream.Name = utils.NewGuid(\"\")\n\tb := sb.Bin.NewBin(stream.Name)\n\n\tqueue, err := gstreamer.BuildQueue(fmt.Sprintf(\"queue_%s\", stream.Name), sb.latency, true)\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tss := &Stream{\n\t\tConf:       stream,\n\t\tBin:        b,\n\t\toutputType: sb.OutputType,\n\t}\n\n\tvar sink *gst.Element\n\tswitch sb.OutputType {\n\tcase types.OutputTypeRTMP:\n\t\tsink, err = gst.NewElementWithName(\"rtmp2sink\", fmt.Sprintf(\"rtmp2sink_%s\", stream.Name))\n\t\tif err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = sink.Set(\"location\", stream.ParsedUrl); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = sink.SetProperty(\"async-connect\", false); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\n\tcase types.OutputTypeSRT:\n\t\tsink, err = gst.NewElementWithName(\"srtsink\", fmt.Sprintf(\"srtsink_%s\", stream.Name))\n\t\tif err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = sink.SetProperty(\"uri\", stream.ParsedUrl); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = sink.SetProperty(\"wait-for-connection\", false); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\n\tdefault:\n\t\treturn nil, errors.ErrInvalidInput(\"output type\")\n\t}\n\n\t// GstBaseSink properties\n\tif err = sink.SetProperty(\"async\", false); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif err = sink.SetProperty(\"sync\", false); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif err = b.AddElements(queue, sink); err != nil {\n\t\treturn nil, err\n\t}\n\tss.sink = sink\n\n\t// add a proxy pad between the queue and sink to prevent errors from propagating upstream\n\tb.SetLinkFunc(func(_ []*gst.Element) error {\n\t\tproxy := gst.NewGhostPad(fmt.Sprintf(\"proxy_%s\", stream.Name), sink.GetStaticPad(\"sink\"))\n\t\tproxy.Ref()\n\t\tproxy.ActivateMode(gst.PadModePush, true)\n\n\t\tswitch sb.OutputType {\n\t\tcase types.OutputTypeRTMP:\n\t\t\tvideoFrameDuration := uint64(1000000000 / framerate)\n\t\t\tproxy.SetChainFunction(func(self *gst.Pad, _ *gst.Object, buffer *gst.Buffer) gst.FlowReturn {\n\t\t\t\tbuffer.Ref()\n\n\t\t\t\tif uint64(buffer.Duration())-videoFrameDuration < 2 && !buffer.HasFlags(gst.BufferFlagDeltaUnit) {\n\t\t\t\t\t// non-delta video frame\n\t\t\t\t\tss.keyframes.Inc()\n\t\t\t\t}\n\n\t\t\t\tlinks, _ := self.GetInternalLinks()\n\t\t\t\tswitch {\n\t\t\t\tcase len(links) != 1:\n\t\t\t\t\treturn gst.FlowNotLinked\n\t\t\t\tcase links[0].Push(buffer) == gst.FlowEOS:\n\t\t\t\t\treturn gst.FlowEOS\n\t\t\t\tdefault:\n\t\t\t\t\treturn gst.FlowOK\n\t\t\t\t}\n\t\t\t})\n\n\t\tcase types.OutputTypeSRT:\n\t\t\tproxy.SetChainListFunction(func(self *gst.Pad, _ *gst.Object, list *gst.BufferList) gst.FlowReturn {\n\t\t\t\tlist.Ref()\n\t\t\t\tif ss.failed.Load() {\n\t\t\t\t\treturn gst.FlowOK\n\t\t\t\t}\n\n\t\t\t\tlinks, _ := self.GetInternalLinks()\n\t\t\t\tif len(links) != 1 {\n\t\t\t\t\treturn gst.FlowNotLinked\n\t\t\t\t}\n\n\t\t\t\tswitch links[0].PushList(list) {\n\t\t\t\tcase gst.FlowEOS:\n\t\t\t\t\treturn gst.FlowEOS\n\t\t\t\tcase gst.FlowError:\n\t\t\t\t\tss.failed.Store(true)\n\t\t\t\t\treturn gst.FlowOK\n\t\t\t\tdefault:\n\t\t\t\t\treturn gst.FlowOK\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\n\t\t// link queue to sink\n\t\tif padReturn := queue.GetStaticPad(\"src\").Link(proxy.Pad); padReturn != gst.PadLinkOK {\n\t\t\treturn errors.ErrPadLinkFailed(queue.GetName(), \"proxy\", padReturn.String())\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn ss, nil\n}\n\nfunc (s *Stream) Reset(streamErr error) (bool, error) {\n\tvar outBytes uint64\n\tif stats, ok := s.Stats(); ok {\n\t\toutBytes = stats.OutBytesAcked\n\t}\n\n\tif s.reconnections.Load() == 0 && outBytes == 0 {\n\t\t// unable to connect, probably a bad stream key or url\n\t\treturn false, nil\n\t}\n\n\tif outBytes > 0 {\n\t\t// first disconnection\n\t\ts.disconnectedAt.Store(time.Now())\n\t\ts.reconnections.Store(0)\n\t} else if time.Since(s.disconnectedAt.Load()) > time.Second*30 {\n\t\treturn false, nil\n\t}\n\n\ts.reconnections.Inc()\n\tlogger.Warnw(\"resetting stream\", streamErr, \"url\", s.Conf.RedactedUrl)\n\n\tif err := s.Bin.SetState(gst.StateNull); err != nil {\n\t\treturn false, err\n\t}\n\tif err := s.Bin.SetState(gst.StatePlaying); err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\nconst (\n\toutBytesTotal = \"out-bytes-total\"\n\toutBytesAcked = \"out-bytes-acked\"\n\tinBytesTotal  = \"in-bytes-total\"\n\tinBytesAcked  = \"in-bytes-acked\"\n\tsrtBytesSent  = \"bytes-sent-total\"\n)\n\nfunc (s *Stream) Stats() (*logging.StreamStats, bool) {\n\tstructure, err := s.sink.GetProperty(\"stats\")\n\tif err != nil || structure == nil {\n\t\treturn nil, false\n\t}\n\n\tstats := structure.(*gst.Structure).Values()\n\tif stats == nil {\n\t\treturn nil, false\n\t}\n\n\tstreamStats := &logging.StreamStats{\n\t\tTimestamp: time.Now().Format(time.DateTime),\n\t}\n\n\tswitch s.outputType {\n\tcase types.OutputTypeRTMP:\n\t\tstreamStats.Keyframes = s.keyframes.Load()\n\t\tstreamStats.OutBytesTotal = tryUInt64(stats, outBytesTotal)\n\t\tstreamStats.OutBytesAcked = tryUInt64(stats, outBytesAcked)\n\t\tstreamStats.InBytesTotal = tryUInt64(stats, inBytesTotal)\n\t\tstreamStats.InBytesAcked = tryUInt64(stats, inBytesAcked)\n\tcase types.OutputTypeSRT:\n\t\tstreamStats.OutBytesTotal = tryUInt64(stats, srtBytesSent)\n\tdefault:\n\t\treturn nil, false\n\t}\n\n\treturn streamStats, true\n}\n\n// sink stats sometimes returns strings instead of uint64\nfunc tryUInt64(stats map[string]interface{}, key string) uint64 {\n\tswitch val := stats[key].(type) {\n\tcase uint64:\n\t\treturn val\n\tdefault:\n\t\tlogger.Infow(fmt.Sprintf(\"unexpected type for %s\", key),\n\t\t\t\"type\", fmt.Sprintf(\"%T\", val),\n\t\t\t\"value\", val,\n\t\t)\n\t\treturn 0\n\t}\n}\n"
  },
  {
    "path": "pkg/pipeline/builder/video.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage builder\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/go-gst/go-gst/gst\"\n\t\"github.com/go-gst/go-gst/gst/app\"\n\t\"github.com/linkdata/deadlock\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/logger\"\n\tlksdk \"github.com/livekit/server-sdk-go/v2\"\n)\n\nconst (\n\tvideoTestSrcName = \"video_test_src\"\n)\n\ntype VideoBin struct {\n\tbin  *gstreamer.Bin\n\tconf *config.PipelineConfig\n\n\tmu          deadlock.Mutex\n\tnextID      int\n\tselectedPad string\n\tlastPTS     uint64\n\tpads        map[string]*gst.Pad\n\tnames       map[string]string\n\tselector    *gst.Element\n\trawVideoTee *gst.Element\n}\n\n// buildVideoQueue creates a queue for the video pipeline. For live sources the\n// queue is leaky (drops old buffers when full) to handle real-time overrun. For\n// non-live replay the queue is blocking so backpressure throttles the source.\nfunc (b *VideoBin) buildVideoQueue(name string) (*gst.Element, error) {\n\tqueue, err := gstreamer.BuildQueue(name, b.conf.Latency.PipelineLatency, b.conf.Live)\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\treturn queue, nil\n}\n\nfunc BuildVideoBin(pipeline *gstreamer.Pipeline, p *config.PipelineConfig) error {\n\tb := &VideoBin{\n\t\tbin:  pipeline.NewBin(\"video\"),\n\t\tconf: p,\n\t}\n\n\tswitch p.SourceType {\n\tcase types.SourceTypeWeb:\n\t\tif err := b.buildWebInput(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase types.SourceTypeSDK:\n\t\tif err := b.buildSDKInput(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpipeline.AddOnTrackAdded(b.onTrackAdded)\n\t\tpipeline.AddOnTrackRemoved(b.onTrackRemoved)\n\t\tpipeline.AddOnTrackMuted(b.onTrackMuted)\n\t\tpipeline.AddOnTrackUnmuted(b.onTrackUnmuted)\n\t\tpipeline.AddOnSourceBinReset(b.onSourceBinReset)\n\t}\n\n\tvar getPad func() *gst.Pad\n\tif len(p.GetEncodedOutputs()) > 1 {\n\t\ttee, err := gst.NewElementWithName(\"tee\", \"video_tee\")\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\tif err = b.bin.AddElement(tee); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tgetPad = func() *gst.Pad {\n\t\t\treturn tee.GetRequestPad(\"src_%u\")\n\t\t}\n\t} else if len(p.GetEncodedOutputs()) > 0 {\n\t\tqueue, err := b.buildVideoQueue(\"video_queue\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = b.bin.AddElement(queue); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tgetPad = func() *gst.Pad {\n\t\t\treturn queue.GetStaticPad(\"src\")\n\t\t}\n\t}\n\n\tb.bin.SetGetSinkPad(func(name string) *gst.Pad {\n\t\tif strings.HasPrefix(name, \"image\") {\n\t\t\treturn b.rawVideoTee.GetRequestPad(\"src_%u\")\n\t\t} else if getPad != nil {\n\t\t\treturn getPad()\n\t\t}\n\n\t\treturn nil\n\t})\n\n\treturn pipeline.AddSourceBin(b.bin)\n}\n\nfunc (b *VideoBin) onTrackAdded(ts *config.TrackSource) {\n\tif b.bin.GetState() > gstreamer.StateRunning {\n\t\treturn\n\t}\n\n\tif ts.TrackKind == lksdk.TrackKindVideo {\n\t\tlogger.Debugw(\"adding video app src bin\", \"trackID\", ts.TrackID)\n\t\tif err := b.addAppSrcBin(ts); err != nil {\n\t\t\tlogger.Errorw(\"failed to add video app src bin\", err, \"trackID\", ts.TrackID)\n\t\t\tb.bin.OnError(err)\n\t\t}\n\t}\n}\n\nfunc (b *VideoBin) onTrackRemoved(trackID string) {\n\tif b.bin.GetState() > gstreamer.StateRunning {\n\t\treturn\n\t}\n\n\tb.mu.Lock()\n\tname, ok := b.names[trackID]\n\tif !ok {\n\t\tb.mu.Unlock()\n\t\treturn\n\t}\n\tdelete(b.names, trackID)\n\tdelete(b.pads, name)\n\n\tif b.selectedPad == name {\n\t\tif err := b.setSelectorPadLocked(videoTestSrcName); err != nil {\n\t\t\tb.mu.Unlock()\n\t\t\tb.bin.OnError(err)\n\t\t\treturn\n\t\t}\n\t}\n\tb.mu.Unlock()\n\n\tif err := b.bin.RemoveSourceBin(name); err != nil {\n\t\tb.bin.OnError(err)\n\t}\n}\n\nfunc (b *VideoBin) onTrackMuted(trackID string) {\n\tif b.bin.GetState() > gstreamer.StateRunning {\n\t\treturn\n\t}\n\n\tb.mu.Lock()\n\tif name, ok := b.names[trackID]; ok && b.selectedPad == name {\n\t\tif err := b.setSelectorPadLocked(videoTestSrcName); err != nil {\n\t\t\tb.mu.Unlock()\n\t\t\tb.bin.OnError(err)\n\t\t\treturn\n\t\t}\n\t}\n\tb.mu.Unlock()\n}\n\nfunc (b *VideoBin) onTrackUnmuted(trackID string) {\n\tif b.bin.GetState() > gstreamer.StateRunning {\n\t\treturn\n\t}\n\n\tb.mu.Lock()\n\tif name, ok := b.names[trackID]; ok {\n\t\tif err := b.setSelectorPadLocked(name); err != nil {\n\t\t\tb.mu.Unlock()\n\t\t\tb.bin.OnError(err)\n\t\t\treturn\n\t\t}\n\t}\n\tb.mu.Unlock()\n}\n\nfunc (b *VideoBin) onSourceBinReset(ts *config.TrackSource) error {\n\tif ts.TrackKind != lksdk.TrackKindVideo {\n\t\treturn nil\n\t}\n\treturn b.resetVideoAppSrcBin(ts)\n}\n\nfunc (b *VideoBin) resetVideoAppSrcBin(ts *config.TrackSource) error {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\toldName, ok := b.names[ts.TrackID]\n\tif !ok {\n\t\treturn errors.New(\"track already removed, cannot reset video source bin\")\n\t}\n\n\tif b.bin.GetState() > gstreamer.StateRunning {\n\t\treturn errors.New(\"pipeline stopping, cannot reset video source bin\")\n\t}\n\n\t// If the stuck bin is the currently selected pad, switch to test src first\n\tif b.conf.VideoDecoding && b.selectedPad == oldName {\n\t\tif err := b.setSelectorPadLocked(videoTestSrcName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Clean up old pad reference before force-remove\n\tdelete(b.pads, oldName)\n\n\t// Force-remove old bin (blocks on GLib main loop, safe to hold b.mu since\n\t// ForceRemoveSourceBin only acquires gstreamer.Bin's internal mutex)\n\tif err := b.bin.ForceRemoveSourceBin(oldName); err != nil {\n\t\treturn fmt.Errorf(\"failed to force remove video source bin: %w\", err)\n\t}\n\n\t// Create new appsrc element (reuse the same element name so watch.go works)\n\tnewElement, err := gst.NewElementWithName(\"appsrc\", fmt.Sprintf(\"app_%s\", ts.TrackID))\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tts.AppSrc = app.SrcFromElement(newElement)\n\n\tname := fmt.Sprintf(\"%s_%d\", ts.TrackID, b.nextID)\n\tb.nextID++\n\n\tappSrcBin, err := b.buildAppSrcBin(ts, name)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to build new video source bin: %w\", err)\n\t}\n\n\tif b.conf.VideoDecoding {\n\t\tb.createSrcPadLocked(ts.TrackID, name)\n\t}\n\n\tif err = b.bin.AddSourceBin(appSrcBin); err != nil {\n\t\treturn fmt.Errorf(\"failed to add new video source bin: %w\", err)\n\t}\n\n\tif b.conf.VideoDecoding {\n\t\tif err := b.setSelectorPadLocked(name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlogger.Infow(\"video source bin reset complete\", \"trackID\", ts.TrackID, \"newBin\", name)\n\treturn nil\n}\n\nfunc (b *VideoBin) buildWebInput() error {\n\txImageSrc, err := gst.NewElement(\"ximagesrc\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = xImageSrc.SetProperty(\"display-name\", b.conf.Display); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = xImageSrc.SetProperty(\"use-damage\", false); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = xImageSrc.SetProperty(\"show-pointer\", false); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tvideoQueue, err := b.buildVideoQueue(\"video_input_queue\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvideoConvert, err := gst.NewElement(\"videoconvert\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tvideoRate, err := gst.NewElement(\"videorate\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = videoRate.SetProperty(\"skip-to-first\", true); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tcaps, err := gst.NewElement(\"capsfilter\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = caps.SetProperty(\"caps\", gst.NewCapsFromString(fmt.Sprintf(\n\t\t\"video/x-raw,framerate=%d/1\",\n\t\tb.conf.Framerate,\n\t),\n\t)); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tif err = b.bin.AddElements(xImageSrc, videoQueue, videoConvert, videoRate, caps); err != nil {\n\t\treturn err\n\t}\n\n\treturn b.addDecodedVideoSink()\n}\n\nfunc (b *VideoBin) buildSDKInput() error {\n\tb.pads = make(map[string]*gst.Pad)\n\tb.names = make(map[string]string)\n\n\t// add selector first so pads can be created\n\tif b.conf.VideoDecoding {\n\t\tif err := b.addSelector(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif b.conf.VideoTrack != nil {\n\t\tif err := b.addAppSrcBin(b.conf.VideoTrack); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif b.conf.VideoDecoding {\n\t\tb.bin.SetGetSrcPad(b.getSrcPad)\n\n\t\tif err := b.addVideoTestSrcBin(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif b.conf.VideoTrack == nil {\n\t\t\tif err := b.setSelectorPad(videoTestSrcName); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif err := b.addDecodedVideoSink(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (b *VideoBin) addAppSrcBin(ts *config.TrackSource) error {\n\tname := fmt.Sprintf(\"%s_%d\", ts.TrackID, b.nextID)\n\tb.nextID++\n\n\tappSrcBin, err := b.buildAppSrcBin(ts, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif b.conf.VideoDecoding {\n\t\tb.createSrcPad(ts.TrackID, name)\n\t}\n\n\tif err = b.bin.AddSourceBin(appSrcBin); err != nil {\n\t\treturn err\n\t}\n\n\tif b.conf.VideoDecoding {\n\t\treturn b.setSelectorPad(name)\n\t}\n\n\treturn nil\n}\n\nfunc (b *VideoBin) buildAppSrcBin(ts *config.TrackSource, name string) (*gstreamer.Bin, error) {\n\tappSrcBin := b.bin.NewBin(name)\n\tappSrcBin.SetEOSFunc(func() bool {\n\t\treturn false\n\t})\n\tts.AppSrc.SetArg(\"format\", \"time\")\n\tif err := ts.AppSrc.SetProperty(\"is-live\", b.conf.Live); err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif !b.conf.Live {\n\t\tif err := ts.AppSrc.SetProperty(\"block\", true); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t}\n\tif err := appSrcBin.AddElement(ts.AppSrc.Element); err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch ts.MimeType {\n\tcase types.MimeTypeH264:\n\t\tif err := ts.AppSrc.SetProperty(\"caps\", gst.NewCapsFromString(fmt.Sprintf(\n\t\t\t\"application/x-rtp,media=video,payload=%d,encoding-name=H264,clock-rate=%d\",\n\t\t\tts.PayloadType, ts.ClockRate,\n\t\t))); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\trtpH264Depay, err := gst.NewElement(\"rtph264depay\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\tcaps, err := gst.NewElement(\"capsfilter\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = caps.SetProperty(\"caps\", gst.NewCapsFromString(\n\t\t\t\"video/x-h264,stream-format=byte-stream\",\n\t\t)); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\tif err = appSrcBin.AddElements(rtpH264Depay, caps); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif !b.conf.VideoDecoding {\n\t\t\th264ParseFixer, err := newPTSFixer(\"h264parse\", fmt.Sprintf(\"track:%s\", ts.TrackID))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif err = appSrcBin.AddElement(h264ParseFixer.Element); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\treturn appSrcBin, nil\n\t\t}\n\n\t\tavDecH264, err := gst.NewElement(\"avdec_h264\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\tif err = appSrcBin.AddElement(avDecH264); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tcase types.MimeTypeVP8:\n\t\tif err := ts.AppSrc.SetProperty(\"caps\", gst.NewCapsFromString(fmt.Sprintf(\n\t\t\t\"application/x-rtp,media=video,payload=%d,encoding-name=VP8,clock-rate=%d\",\n\t\t\tts.PayloadType, ts.ClockRate,\n\t\t))); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\trtpVP8Depay, err := gst.NewElement(\"rtpvp8depay\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = appSrcBin.AddElement(rtpVP8Depay); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif !b.conf.VideoDecoding {\n\t\t\treturn appSrcBin, nil\n\t\t}\n\t\tvp8Dec, err := gst.NewElement(\"vp8dec\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = appSrcBin.AddElement(vp8Dec); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tcase types.MimeTypeVP9:\n\t\tif err := ts.AppSrc.SetProperty(\"caps\", gst.NewCapsFromString(fmt.Sprintf(\n\t\t\t\"application/x-rtp,media=video,payload=%d,encoding-name=VP9,clock-rate=%d\",\n\t\t\tts.PayloadType, ts.ClockRate,\n\t\t))); err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\trtpVP9Depay, err := gst.NewElement(\"rtpvp9depay\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = appSrcBin.AddElement(rtpVP9Depay); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif !b.conf.VideoDecoding {\n\t\t\tvp9ParseFixer, err := newPTSFixer(\"vp9parse\", fmt.Sprintf(\"track:%s\", ts.TrackID))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tvp9Parse := vp9ParseFixer.Element\n\n\t\t\tvp9Caps, err := gst.NewElement(\"capsfilter\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t\t}\n\t\t\tif err = vp9Caps.SetProperty(\"caps\", gst.NewCapsFromString(\n\t\t\t\t\"video/x-vp9,width=[16,2147483647],height=[16,2147483647]\",\n\t\t\t)); err != nil {\n\t\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t\t}\n\n\t\t\tif err = appSrcBin.AddElements(vp9Parse, vp9Caps); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn appSrcBin, nil\n\t\t}\n\n\t\tvp9Dec, err := gst.NewElement(\"vp9dec\")\n\t\tif err != nil {\n\t\t\treturn nil, errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = appSrcBin.AddElement(vp9Dec); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tdefault:\n\t\treturn nil, errors.ErrNotSupported(string(ts.MimeType))\n\t}\n\n\tif err := b.addVideoConverter(appSrcBin); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn appSrcBin, nil\n}\n\nfunc (b *VideoBin) addVideoTestSrcBin() error {\n\ttestSrcBin := b.bin.NewBin(videoTestSrcName)\n\tif err := b.bin.AddSourceBin(testSrcBin); err != nil {\n\t\treturn err\n\t}\n\n\tvideoTestSrc, err := gst.NewElement(\"videotestsrc\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = videoTestSrc.SetProperty(\"is-live\", true); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tvideoTestSrc.SetArg(\"pattern\", \"black\")\n\n\tqueue, err := gstreamer.BuildQueue(\"video_test_src_queue\", b.conf.Latency.PipelineLatency, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = queue.SetProperty(\"min-threshold-time\", uint64(2e9)); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tcaps, err := b.newVideoCapsFilter(true)\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tif err = testSrcBin.AddElements(videoTestSrc, queue, caps); err != nil {\n\t\treturn err\n\t}\n\n\tb.createTestSrcPad()\n\treturn nil\n}\n\nfunc (b *VideoBin) addSelector() error {\n\tinputSelector, err := gst.NewElement(\"input-selector\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tvideoRate, err := gst.NewElement(\"videorate\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = videoRate.SetProperty(\"skip-to-first\", true); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tcaps, err := b.newVideoCapsFilter(true)\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tif err = b.bin.AddElements(inputSelector, videoRate, caps); err != nil {\n\t\treturn err\n\t}\n\n\tb.selector = inputSelector\n\treturn nil\n}\n\nfunc (b *VideoBin) addEncoder() error {\n\tvideoQueue, err := gstreamer.BuildQueue(\"video_encoder_queue\", b.conf.Latency.PipelineLatency, false)\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = b.bin.AddElement(videoQueue); err != nil {\n\t\treturn err\n\t}\n\n\tswitch b.conf.VideoOutCodec {\n\t// we only encode h264, the rest are too slow\n\tcase types.MimeTypeH264:\n\t\tx264Enc, err := gst.NewElement(\"x264enc\")\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\tx264Enc.SetArg(\"speed-preset\", \"veryfast\")\n\n\t\tvar options []string\n\t\tdisabledSceneCut := false\n\t\t// Streaming outputs always set KeyFrameInterval, so this effectively disables scenecut for RTMP/SRT.\n\t\tif b.conf.KeyFrameInterval != 0 {\n\t\t\tkeyframeInterval := uint(b.conf.KeyFrameInterval * float64(b.conf.Framerate))\n\t\t\tif err = x264Enc.SetProperty(\"key-int-max\", keyframeInterval); err != nil {\n\t\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t\t}\n\t\t\toptions = append(options, \"scenecut=0\")\n\t\t\tdisabledSceneCut = true\n\t\t}\n\n\t\tbufCapacity := uint(2000) // 2s\n\t\tif b.conf.GetSegmentConfig() != nil {\n\t\t\t// avoid key frames other than at segments boundaries as splitmuxsink can become inconsistent otherwise\n\t\t\tif !disabledSceneCut {\n\t\t\t\toptions = append(options, \"scenecut=0\")\n\t\t\t\tdisabledSceneCut = true\n\t\t\t}\n\t\t\tbufCapacity = uint(time.Duration(b.conf.GetSegmentConfig().SegmentDuration) * (time.Second / time.Millisecond))\n\t\t}\n\t\tif bufCapacity > 10000 {\n\t\t\t// Max value allowed by gstreamer\n\t\t\tbufCapacity = 10000\n\t\t}\n\t\tif err = x264Enc.SetProperty(\"vbv-buf-capacity\", bufCapacity); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\tif err = x264Enc.SetProperty(\"bitrate\", uint(b.conf.VideoBitrate)); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\tif sc := b.conf.GetStreamConfig(); sc != nil && sc.OutputType == types.OutputTypeRTMP {\n\t\t\toptions = append(options, \"nal-hrd=cbr\")\n\t\t}\n\t\tif len(options) > 0 {\n\t\t\toptionString := strings.Join(options, \":\")\n\t\t\tif err = x264Enc.SetProperty(\"option-string\", optionString); err != nil {\n\t\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t\t}\n\t\t}\n\n\t\tcaps, err := gst.NewElement(\"capsfilter\")\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = caps.SetProperty(\"caps\", gst.NewCapsFromString(fmt.Sprintf(\n\t\t\t\"video/x-h264,profile=%s,multiview-mode=mono,multiview-flags=(GstVideoMultiviewFlagsSet)0:ffffffff:/right-view-first/left-flipped/left-flopped/right-flipped/right-flopped/half-aspect/mixed-mono\",\n\t\t\tb.conf.VideoProfile,\n\t\t))); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\n\t\tif err = b.bin.AddElements(x264Enc, caps); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\n\tcase types.MimeTypeVP9:\n\t\tvp9Enc, err := gst.NewElement(\"vp9enc\")\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = vp9Enc.SetProperty(\"deadline\", int64(1)); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = vp9Enc.SetProperty(\"row-mt\", true); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = vp9Enc.SetProperty(\"tile-columns\", 3); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = vp9Enc.SetProperty(\"tile-rows\", 1); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = vp9Enc.SetProperty(\"frame-parallel\", true); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = vp9Enc.SetProperty(\"max-quantizer\", 52); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = vp9Enc.SetProperty(\"min-quantizer\", 2); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = b.bin.AddElement(vp9Enc); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfallthrough\n\n\tdefault:\n\t\treturn errors.ErrNotSupported(fmt.Sprintf(\"%s encoding\", b.conf.VideoOutCodec))\n\t}\n}\n\nfunc (b *VideoBin) addDecodedVideoSink() error {\n\tvar err error\n\tb.rawVideoTee, err = gst.NewElement(\"tee\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\tif err = b.bin.AddElement(b.rawVideoTee); err != nil {\n\t\treturn err\n\t}\n\n\tif b.conf.VideoEncoding {\n\t\terr = b.addEncoder()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (b *VideoBin) addVideoConverter(bin *gstreamer.Bin) error {\n\tvideoQueue, err := b.buildVideoQueue(\"video_input_queue\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvideoConvert, err := gst.NewElement(\"videoconvert\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tvideoScale, err := gst.NewElement(\"videoscale\")\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\telements := []*gst.Element{videoQueue, videoConvert, videoScale}\n\n\tif !b.conf.VideoDecoding {\n\t\tvideoRate, err := gst.NewElement(\"videorate\")\n\t\tif err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\tif err = videoRate.SetProperty(\"skip-to-first\", true); err != nil {\n\t\t\treturn errors.ErrGstPipelineError(err)\n\t\t}\n\t\telements = append(elements, videoRate)\n\t}\n\n\tcaps, err := b.newVideoCapsFilter(!b.conf.VideoDecoding)\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\telements = append(elements, caps)\n\n\treturn bin.AddElements(elements...)\n}\n\nfunc (b *VideoBin) newVideoCapsFilter(includeFramerate bool) (*gst.Element, error) {\n\tcaps, err := gst.NewElement(\"capsfilter\")\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tif includeFramerate {\n\t\terr = caps.SetProperty(\"caps\", gst.NewCapsFromString(fmt.Sprintf(\n\t\t\t\"video/x-raw,framerate=%d/1,format=I420,width=%d,height=%d,colorimetry=bt709,chroma-site=mpeg2,pixel-aspect-ratio=1/1\",\n\t\t\tb.conf.Framerate, b.conf.Width, b.conf.Height,\n\t\t)))\n\t} else {\n\t\terr = caps.SetProperty(\"caps\", gst.NewCapsFromString(fmt.Sprintf(\n\t\t\t\"video/x-raw,format=I420,width=%d,height=%d,colorimetry=bt709,chroma-site=mpeg2,pixel-aspect-ratio=1/1\",\n\t\t\tb.conf.Width, b.conf.Height,\n\t\t)))\n\t}\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\treturn caps, nil\n}\n\nfunc (b *VideoBin) getSrcPad(name string) *gst.Pad {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\treturn b.pads[name]\n}\n\nfunc (b *VideoBin) createSrcPad(trackID, name string) {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tb.createSrcPadLocked(trackID, name)\n}\n\nfunc (b *VideoBin) createSrcPadLocked(trackID, name string) {\n\tb.names[trackID] = name\n\n\tpad := b.selector.GetRequestPad(\"sink_%u\")\n\tpad.AddProbe(gst.PadProbeTypeBuffer, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn {\n\t\tpts := uint64(info.GetBuffer().PresentationTimestamp())\n\t\tb.mu.Lock()\n\t\tif pts < b.lastPTS || (b.selectedPad != videoTestSrcName && b.selectedPad != name) {\n\t\t\tb.mu.Unlock()\n\t\t\treturn gst.PadProbeDrop\n\t\t}\n\t\tb.lastPTS = pts\n\t\tb.mu.Unlock()\n\t\treturn gst.PadProbeOK\n\t})\n\n\tb.pads[name] = pad\n}\n\nfunc (b *VideoBin) createTestSrcPad() {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\tpad := b.selector.GetRequestPad(\"sink_%u\")\n\tpad.AddProbe(gst.PadProbeTypeBuffer, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn {\n\t\tpts := uint64(info.GetBuffer().PresentationTimestamp())\n\t\tb.mu.Lock()\n\t\tif pts < b.lastPTS || (b.selectedPad != videoTestSrcName) {\n\t\t\tb.mu.Unlock()\n\t\t\treturn gst.PadProbeDrop\n\t\t}\n\t\tb.lastPTS = pts\n\t\tb.mu.Unlock()\n\t\treturn gst.PadProbeOK\n\t})\n\n\tb.pads[videoTestSrcName] = pad\n}\n\nfunc (b *VideoBin) setSelectorPad(name string) error {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\treturn b.setSelectorPadLocked(name)\n}\n\nfunc (b *VideoBin) setSelectorPadLocked(name string) error {\n\tpad := b.pads[name]\n\n\t// drop until the next keyframe\n\tpad.AddProbe(gst.PadProbeTypeBuffer, func(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn {\n\t\tbuffer := info.GetBuffer()\n\t\tif buffer.HasFlags(gst.BufferFlagDeltaUnit) {\n\t\t\treturn gst.PadProbeDrop\n\t\t}\n\t\tlogger.Debugw(\"active pad changed\", \"name\", name)\n\t\treturn gst.PadProbeRemove\n\t})\n\n\tif err := b.selector.SetProperty(\"active-pad\", pad); err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tb.selectedPad = name\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/pipeline/builder/vp9_probe.go",
    "content": "package builder\n\nimport (\n\t\"fmt\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/go-gst/go-gst/gst\"\n\t\"github.com/linkdata/deadlock\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\nconst (\n\tkeyframeHistorySize     = 10\n\tkeyframeRequestInterval = 200 * time.Millisecond\n)\n\n// vp9ParseProbe inspects buffers around vp9parse to detect and signal missing\n// PTS and capture timing diagnostics. It never mutates the media flow; state\n// such as lastSinkPTS is tracked solely for logging and debugging.\ntype vp9ParseProbe struct {\n\ttrackID string\n\n\tsrcPad  *gst.Pad\n\tsinkPad *gst.Pad\n\n\tsrcProbeID  uint64\n\tsinkProbeID uint64\n\n\tonSignal func()\n\n\tlogger logger.Logger\n\n\tlastSrcPTS   atomic.Uint64\n\tlastSrcValid atomic.Bool\n\tmissingPTS   atomic.Bool\n\n\tlastSinkPTS   atomic.Uint64\n\tlastSinkValid atomic.Bool\n\n\tkeyframeMu            deadlock.Mutex\n\tkeyframePTS           []time.Duration\n\ttotalIntervalSum      time.Duration\n\ttotalIntervals        int\n\tlastKeyframeRequestNS atomic.Int64\n\tkeyframePending       atomic.Bool\n}\n\nfunc newVP9ParseProbe(trackID string, parse *gst.Element, onSignal func()) (*vp9ParseProbe, error) {\n\tsrcPad := parse.GetStaticPad(\"src\")\n\tif srcPad == nil {\n\t\treturn nil, errors.ErrGstPipelineError(newMissingPadError(\"vp9parse\", \"src\"))\n\t}\n\n\tsinkPad := parse.GetStaticPad(\"sink\")\n\tif sinkPad == nil {\n\t\tsrcPad.Unref()\n\t\treturn nil, errors.ErrGstPipelineError(newMissingPadError(\"vp9parse\", \"sink\"))\n\t}\n\n\tp := &vp9ParseProbe{\n\t\ttrackID:     trackID,\n\t\tsrcPad:      srcPad,\n\t\tsinkPad:     sinkPad,\n\t\tonSignal:    onSignal,\n\t\tlogger:      logger.GetLogger().WithValues(\"trackID\", trackID, \"component\", \"vp9_probe\"),\n\t\tkeyframePTS: make([]time.Duration, 0, keyframeHistorySize),\n\t}\n\n\tp.srcProbeID = srcPad.AddProbe(gst.PadProbeTypeBuffer, p.onSrcBuffer)\n\tp.sinkProbeID = sinkPad.AddProbe(gst.PadProbeTypeBuffer, p.onSinkBuffer)\n\n\treturn p, nil\n}\n\nfunc (p *vp9ParseProbe) Close() {\n\tp.logKeyframeHistory(\"probe_closed\")\n\n\tif p.srcPad != nil {\n\t\tp.srcPad.RemoveProbe(p.srcProbeID)\n\t\tp.srcPad.Unref()\n\t\tp.srcPad = nil\n\t}\n\tif p.sinkPad != nil {\n\t\tp.sinkPad.RemoveProbe(p.sinkProbeID)\n\t\tp.sinkPad.Unref()\n\t\tp.sinkPad = nil\n\t}\n}\n\nfunc (p *vp9ParseProbe) onSrcBuffer(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn {\n\tbuffer := info.GetBuffer()\n\tif buffer == nil {\n\t\treturn gst.PadProbeOK\n\t}\n\n\tpts, ok := clockTimeToDuration(buffer.PresentationTimestamp())\n\tif !ok {\n\t\tp.handleMissingPTS()\n\t\treturn gst.PadProbeDrop\n\t}\n\n\tp.handleValidPTS(buffer, pts)\n\tif p.keyframePending.Load() {\n\t\treturn gst.PadProbeDrop\n\t}\n\treturn gst.PadProbeOK\n}\n\n// just for logging purposes\nfunc (p *vp9ParseProbe) onSinkBuffer(_ *gst.Pad, info *gst.PadProbeInfo) gst.PadProbeReturn {\n\tbuffer := info.GetBuffer()\n\tif buffer == nil {\n\t\treturn gst.PadProbeOK\n\t}\n\n\tpts, ok := clockTimeToDuration(buffer.PresentationTimestamp())\n\tif !ok {\n\t\treturn gst.PadProbeOK\n\t}\n\n\tif !p.lastSinkValid.Load() {\n\t\tp.lastSinkPTS.Store(uint64(pts))\n\t\tp.lastSinkValid.Store(true)\n\t\treturn gst.PadProbeOK\n\t}\n\n\tprev := time.Duration(p.lastSinkPTS.Load())\n\tdelta := pts - prev\n\tif delta < 0 {\n\t\tp.logger.Warnw(\"vp9parse sink pts moved backwards\", nil, \"delta\", delta)\n\t\tp.logKeyframeHistory(\"backward_pts\")\n\t}\n\n\tp.lastSinkPTS.Store(uint64(pts))\n\n\treturn gst.PadProbeOK\n}\n\nfunc (p *vp9ParseProbe) handleMissingPTS() {\n\tp.keyframePending.Store(true)\n\tp.requestKeyframeIfDue()\n\n\tif !p.missingPTS.CompareAndSwap(false, true) {\n\t\treturn\n\t}\n\n\tfields := []any{}\n\tif p.lastSrcValid.Load() {\n\t\tlast := time.Duration(p.lastSrcPTS.Load())\n\t\tfields = append(fields, \"lastValidPTS\", last)\n\t}\n\tif avg, count, ok := p.keyframeStats(); ok {\n\t\tfields = append(fields, \"avgKeyframeInterval\", avg, \"keyframesTracked\", count)\n\t}\n\tp.logger.Warnw(\"vp9parse buffer missing PTS\", nil, fields...)\n\tp.logKeyframeHistory(\"missing_pts\")\n}\n\nfunc (p *vp9ParseProbe) handleValidPTS(buffer *gst.Buffer, pts time.Duration) {\n\tp.lastSrcPTS.Store(uint64(pts))\n\tp.lastSrcValid.Store(true)\n\tp.missingPTS.Store(false)\n\n\tif buffer.GetFlags()&gst.BufferFlagDeltaUnit == 0 {\n\t\twasPending := p.keyframePending.Swap(false)\n\t\tif wasPending {\n\t\t\tp.logger.Debugw(\"keyframe pending, got one\")\n\t\t\tbuffer.SetFlags(buffer.GetFlags() | gst.BufferFlagDiscont)\n\t\t}\n\t\tp.trackKeyframe(pts)\n\t} else {\n\t\tp.requestKeyframeIfDue()\n\t}\n}\n\nfunc (p *vp9ParseProbe) trackKeyframe(pts time.Duration) {\n\tp.keyframeMu.Lock()\n\tdefer p.keyframeMu.Unlock()\n\n\tif count := len(p.keyframePTS); count > 0 {\n\t\tdelta := pts - p.keyframePTS[count-1]\n\t\tif delta > 0 {\n\t\t\tp.totalIntervalSum += delta\n\t\t\tp.totalIntervals++\n\t\t}\n\t}\n\n\tp.keyframePTS = append(p.keyframePTS, pts)\n\tif len(p.keyframePTS) > keyframeHistorySize {\n\t\tp.keyframePTS = p.keyframePTS[1:]\n\t\t// sliding window only keeps the most recent timestamps for debugging logs\n\t}\n\n}\n\nfunc (p *vp9ParseProbe) requestKeyframeIfDue() {\n\tif p.onSignal == nil {\n\t\treturn\n\t}\n\tif !p.keyframePending.Load() {\n\t\treturn\n\t}\n\n\tnow := time.Now().UnixNano()\n\tlast := p.lastKeyframeRequestNS.Load()\n\tif last != 0 && time.Duration(now-last) < keyframeRequestInterval {\n\t\treturn\n\t}\n\n\tp.onSignal()\n\n\tp.lastKeyframeRequestNS.Store(now)\n}\n\nfunc clockTimeToDuration(ct gst.ClockTime) (time.Duration, bool) {\n\tif ct == gst.ClockTimeNone {\n\t\treturn 0, false\n\t}\n\treturn time.Duration(uint64(ct)), true\n}\n\nfunc (p *vp9ParseProbe) keyframeStats() (time.Duration, int, bool) {\n\tp.keyframeMu.Lock()\n\tdefer p.keyframeMu.Unlock()\n\n\tif p.totalIntervals == 0 {\n\t\treturn 0, len(p.keyframePTS), false\n\t}\n\n\tavg := p.totalIntervalSum / time.Duration(p.totalIntervals)\n\treturn avg, p.totalIntervals + 1, true\n}\n\nfunc (p *vp9ParseProbe) logKeyframeHistory(reason string) {\n\tp.keyframeMu.Lock()\n\tif len(p.keyframePTS) == 0 {\n\t\tp.keyframeMu.Unlock()\n\t\treturn\n\t}\n\n\thistory := make([]time.Duration, len(p.keyframePTS))\n\tcopy(history, p.keyframePTS)\n\tavg := time.Duration(0)\n\tcount := 0\n\tif p.totalIntervals > 0 {\n\t\tavg = p.totalIntervalSum / time.Duration(p.totalIntervals)\n\t\tcount = p.totalIntervals + 1\n\t}\n\tp.keyframeMu.Unlock()\n\tp.logger.Debugw(\"vp9 keyframe history\", \"reason\", reason, \"history\", history, \"avgKeyframeInterval\", avg, \"keyframesTracked\", count)\n}\n\ntype missingPadError struct {\n\telement string\n\tpad     string\n}\n\nfunc newMissingPadError(element, pad string) error {\n\treturn missingPadError{element: element, pad: pad}\n}\n\nfunc (e missingPadError) Error() string {\n\treturn fmt.Sprintf(\"missing %s pad on %s\", e.pad, e.element)\n}\n"
  },
  {
    "path": "pkg/pipeline/builder/websocket.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage builder\n\nimport (\n\t\"github.com/go-gst/go-gst/gst\"\n\t\"github.com/go-gst/go-gst/gst/app\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n)\n\nfunc BuildWebsocketBin(pipeline *gstreamer.Pipeline, appSinkCallbacks *app.SinkCallbacks) (*gstreamer.Bin, error) {\n\tb := pipeline.NewBin(\"websocket\")\n\n\tappSink, err := app.NewAppSink()\n\tif err != nil {\n\t\treturn nil, errors.ErrGstPipelineError(err)\n\t}\n\tappSink.SetCallbacks(appSinkCallbacks)\n\n\tif err = b.AddElement(appSink.Element); err != nil {\n\t\treturn nil, err\n\t}\n\n\tb.SetGetSrcPad(func(_ string) *gst.Pad {\n\t\treturn appSink.GetStaticPad(\"sink\")\n\t})\n\n\treturn b, nil\n}\n"
  },
  {
    "path": "pkg/pipeline/controller.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage pipeline\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/frostbyte73/core\"\n\t\"github.com/go-gst/go-gst/gst\"\n\t\"github.com/linkdata/deadlock\"\n\t\"go.uber.org/atomic\"\n\t\"go.uber.org/zap\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/ipc\"\n\t\"github.com/livekit/egress/pkg/pipeline/builder\"\n\t\"github.com/livekit/egress/pkg/pipeline/sink\"\n\t\"github.com/livekit/egress/pkg/pipeline/source\"\n\t\"github.com/livekit/egress/pkg/stats\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/psrpc\"\n\t\"go.opentelemetry.io/otel\"\n)\n\nconst (\n\tpipelineName = \"pipeline\"\n\teosTimeout   = time.Second * 30\n\n\tstreamRetryUpdateInterval = time.Minute\n)\n\ntype Controller struct {\n\t*config.PipelineConfig\n\tipcServiceClient ipc.EgressServiceClient\n\n\t// gstreamer\n\tgstLogger *zap.SugaredLogger\n\tsrc       source.Source\n\tcallbacks *gstreamer.Callbacks\n\tp         *gstreamer.Pipeline\n\tsinks     map[types.EgressType][]sink.Sink\n\n\t// replay timing\n\treplayStartAt  int64 // wallclock unix nanos\n\treplayDuration int64 // milliseconds\n\n\t// internal\n\tmu                   deadlock.Mutex\n\tmonitor              *stats.HandlerMonitor\n\tlimitTimer           *time.Timer\n\tstorageMonitorCancel context.CancelFunc\n\tpaused               core.Fuse\n\tplaying              core.Fuse\n\teosSent              core.Fuse\n\teosTimer             *time.Timer\n\teosReceived          core.Fuse\n\tstopped              core.Fuse\n\tstorageLimitOnce     sync.Once\n\tpipelineEndedAt      int64\n\tstats                controllerStats\n\tpipelineCreatedAt    time.Time\n}\n\ntype controllerStats struct {\n\tmixerDroppedAudioBuffers atomic.Uint64\n\tdroppedVideoBuffers      atomic.Uint64\n\n\tmixerDroppedAudioDuration atomic.Duration\n\n\tqueuesDroppedAudioBuffers atomic.Uint64\n\n\tdroppedAudioBuffersByQueue map[string]uint64\n\tdroppedVideoBuffersByQueue map[string]uint64\n}\n\n// SourceBuilder constructs a pipeline source. It receives the controller's\n// callbacks so the source can synchronize on GstReady; custom sources that\n// don't need gst synchronization can ignore the argument.\ntype SourceBuilder func(callbacks *gstreamer.Callbacks) (source.Source, error)\n\nvar (\n\ttracer = otel.Tracer(\"github.com/livekit/egress/pkg/pipeline\")\n)\n\nfunc New(ctx context.Context, conf *config.PipelineConfig, ipcServiceClient ipc.EgressServiceClient) (*Controller, error) {\n\tctx, span := tracer.Start(ctx, \"Pipeline.New\")\n\tdefer span.End()\n\n\treturn NewWithSource(ctx, conf, ipcServiceClient, func(callbacks *gstreamer.Callbacks) (source.Source, error) {\n\t\treturn source.New(ctx, conf, callbacks)\n\t})\n}\n\n// NewWithSource creates a Controller using the given SourceBuilder. The builder\n// runs after the controller has been constructed and receives the controller's\n// Callbacks, so the source can share GstReady with the pipeline. Use this when\n// the source isn't the standard source.New (testfeeder, replay export, etc.).\nfunc NewWithSource(\n\tctx context.Context,\n\tconf *config.PipelineConfig,\n\tipcServiceClient ipc.EgressServiceClient,\n\tsrcBuilder SourceBuilder,\n) (*Controller, error) {\n\tc := newController(conf, ipcServiceClient)\n\n\t// initialize gst\n\tgo func() {\n\t\t_, span := tracer.Start(ctx, \"gst.Init\")\n\t\tdefer span.End()\n\t\tgst.Init(nil)\n\t\tgst.SetLogFunction(c.gstLog)\n\t\tclose(c.callbacks.GstReady)\n\t}()\n\n\tsrc, err := srcBuilder(c.callbacks)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.src = src\n\n\t// create pipeline\n\t<-c.callbacks.GstReady\n\tif err := c.BuildPipeline(); err != nil {\n\t\tc.src.Close()\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}\n\n// Callbacks returns the pipeline callbacks. Sources that need to wait for\n// GstReady before creating appsrc elements can use this.\nfunc (c *Controller) Callbacks() *gstreamer.Callbacks {\n\treturn c.callbacks\n}\n\nfunc newController(conf *config.PipelineConfig, ipcServiceClient ipc.EgressServiceClient) *Controller {\n\tc := &Controller{\n\t\tPipelineConfig:   conf,\n\t\tipcServiceClient: ipcServiceClient,\n\t\tgstLogger:        logger.GetLogger().(logger.ZapLogger).ToZap().WithOptions(zap.WithCaller(false)),\n\t\tcallbacks: &gstreamer.Callbacks{\n\t\t\tGstReady:   make(chan struct{}),\n\t\t\tBuildReady: make(chan struct{}),\n\t\t},\n\t\tsinks:   make(map[types.EgressType][]sink.Sink),\n\t\tmonitor: stats.NewHandlerMonitor(conf.NodeID, conf.ClusterID, conf.Info.EgressId),\n\t\tstats: controllerStats{\n\t\t\tdroppedVideoBuffersByQueue: make(map[string]uint64),\n\t\t\tdroppedAudioBuffersByQueue: make(map[string]uint64),\n\t\t},\n\t}\n\tc.callbacks.SetOnError(c.OnError)\n\tc.callbacks.SetOnEOSSent(c.onEOSSent)\n\tc.callbacks.SetOnDebugDotRequest(func(reason string) {\n\t\tif !c.Debug.EnableProfiling {\n\t\t\treturn\n\t\t}\n\t\tlogger.Debugw(\"debug dot requested\", \"reason\", reason)\n\t\tc.generateDotFile(reason)\n\t})\n\treturn c\n}\n\nfunc (c *Controller) BuildPipeline() error {\n\tp, err := gstreamer.NewPipeline(pipelineName, c.Latency.PipelineLatency, c.callbacks)\n\tif err != nil {\n\t\treturn errors.ErrGstPipelineError(err)\n\t}\n\n\tc.pipelineCreatedAt = time.Now()\n\n\tp.SetWatch(c.messageWatch)\n\tp.AddOnStop(func() error {\n\t\tc.stopped.Break()\n\t\treturn nil\n\t})\n\tif sdkSrc, ok := c.src.(*source.SDKSource); ok {\n\t\tp.SetEOSFunc(func() bool {\n\t\t\tsdkSrc.CloseWriters()\n\t\t\treturn true\n\t\t})\n\t}\n\n\tif c.AudioEnabled {\n\t\tif err = builder.BuildAudioBin(p, c.PipelineConfig); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif c.VideoEnabled {\n\t\tif err = builder.BuildVideoBin(p, c.PipelineConfig); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor egressType, outputs := range c.Outputs {\n\t\tfor _, o := range outputs {\n\t\t\ts, err := sink.NewSink(p, c.PipelineConfig, egressType, o, c.callbacks, c.monitor)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tc.sinks[egressType] = append(c.sinks[egressType], s)\n\t\t}\n\t}\n\n\tif err = p.Link(); err != nil {\n\t\treturn err\n\t}\n\n\t// initial graph is fully wired; from now on, dynamic additions must be linked immediately\n\tp.UpgradeState(gstreamer.StateStarted)\n\n\tc.p = p\n\tif timeAware, ok := c.src.(source.TimeAware); ok {\n\t\ttimeAware.SetTimeProvider(p)\n\t}\n\tclose(c.callbacks.BuildReady)\n\treturn nil\n}\n\nfunc (c *Controller) SetReplayTiming(startAt, durationMs int64) {\n\tc.replayStartAt = startAt\n\tc.replayDuration = durationMs\n}\n\nfunc (c *Controller) Run(ctx context.Context) *livekit.EgressInfo {\n\tctx, span := tracer.Start(ctx, \"Pipeline.Run\")\n\tdefer span.End()\n\n\tdefer c.Close()\n\n\tdefer func() {\n\t\tif c.VideoEnabled {\n\t\t\tlogger.Infow(\n\t\t\t\t\"video input queue stats\",\n\t\t\t\t\"videoBuffersDropped\", c.stats.droppedVideoBuffers.Load(),\n\t\t\t\t\"requestType\", c.RequestType,\n\t\t\t\t\"sourceType\", c.SourceType,\n\t\t\t\t\"droppedByQueue\", c.stats.droppedVideoBuffersByQueue,\n\t\t\t)\n\t\t}\n\t\tif c.SourceType == types.SourceTypeSDK {\n\t\t\tlogger.Infow(\n\t\t\t\t\"audio qos stats\",\n\t\t\t\t\"audioBuffersDropped\", c.stats.mixerDroppedAudioBuffers.Load(),\n\t\t\t\t\"totalAudioDurationDropped\", c.stats.mixerDroppedAudioDuration.Load(),\n\t\t\t\t\"queueDroppedAudioBuffers\", c.stats.queuesDroppedAudioBuffers.Load(),\n\t\t\t\t\"droppedByQueue\", c.stats.droppedAudioBuffersByQueue,\n\t\t\t\t\"requestType\", c.RequestType,\n\t\t\t)\n\t\t}\n\t}()\n\n\t// session limit timer\n\tc.startSessionLimitTimer(ctx)\n\n\t// close when room ends\n\tgo func() {\n\t\t<-c.src.EndRecording()\n\t\tc.SendEOS(ctx, livekit.EndReasonSrcClosed)\n\t}()\n\n\t// wait until room is ready\n\tstart := c.src.StartRecording()\n\tif start != nil {\n\t\tlogger.Debugw(\"waiting for start signal\")\n\t\tselect {\n\t\tcase <-c.stopped.Watch():\n\t\t\tc.src.Close()\n\t\t\tc.Info.SetAborted(livekit.MsgStartNotReceived)\n\t\t\treturn c.Info\n\t\tcase <-start:\n\t\t\t// continue\n\t\t}\n\t}\n\n\t// Replay timing gate: wait until start_at\n\tif c.replayStartAt > 0 {\n\t\twaitDuration := time.Until(time.Unix(0, c.replayStartAt))\n\t\tif waitDuration > 0 {\n\t\t\tlogger.Debugw(\"waiting for replay start time\", \"waitDuration\", waitDuration)\n\t\t\tselect {\n\t\t\tcase <-c.stopped.Watch():\n\t\t\t\tc.src.Close()\n\t\t\t\tc.Info.SetAborted(livekit.MsgStartNotReceived)\n\t\t\t\treturn c.Info\n\t\t\tcase <-time.After(waitDuration):\n\t\t\t\t// continue\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, si := range c.sinks {\n\t\tfor _, s := range si {\n\t\t\tif err := s.Start(); err != nil {\n\t\t\t\tc.src.Close()\n\t\t\t\tc.Info.SetFailed(err)\n\t\t\t\treturn c.Info\n\t\t\t}\n\t\t}\n\t}\n\n\tc.startOutputSizeMonitor()\n\n\t// Replay duration timer\n\tif c.replayDuration > 0 {\n\t\ttime.AfterFunc(time.Duration(c.replayDuration)*time.Millisecond, func() {\n\t\t\tc.SendEOS(ctx, livekit.EndReasonSrcClosed)\n\t\t})\n\t}\n\n\terr := c.p.Run()\n\tif err != nil {\n\t\tc.src.Close()\n\t\tc.Info.SetFailed(err)\n\t\treturn c.Info\n\t}\n\n\tlogger.Debugw(\"closing source\")\n\tc.src.Close()\n\n\tif c.playing.IsBroken() {\n\t\tlogger.Debugw(\"closing sinks\")\n\t\tfor _, si := range c.sinks {\n\t\t\tfor _, s := range si {\n\t\t\t\tif c.eosReceived.IsBroken() || s.EOSReceived() {\n\t\t\t\t\tif err := s.Close(); err != nil && c.Info.Status != livekit.EgressStatus_EGRESS_FAILED {\n\t\t\t\t\t\tc.Info.SetFailed(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn c.Info\n}\n\nfunc (c *Controller) UpdateStream(ctx context.Context, req *livekit.UpdateStreamRequest) error {\n\tctx, span := tracer.Start(ctx, \"Pipeline.UpdateStream\")\n\tdefer span.End()\n\n\to := c.GetStreamConfig()\n\tif o == nil {\n\t\treturn errors.ErrNonStreamingPipeline\n\t}\n\n\terrs := errors.ErrArray{}\n\n\t// add stream outputs first\n\tfor _, rawUrl := range req.AddOutputUrls {\n\t\t// validate and redact url\n\t\tstream, err := o.AddStream(rawUrl, o.OutputType)\n\t\tif err != nil {\n\t\t\terrs.AppendErr(err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// add stream info to results\n\t\tc.mu.Lock()\n\t\tc.Info.StreamResults = append(c.Info.StreamResults, stream.StreamInfo)\n\t\tif list := c.Info.GetStream(); list != nil { //nolint:staticcheck // keep deprecated field for older clients\n\t\t\tlist.Info = append(list.Info, stream.StreamInfo)\n\t\t}\n\t\tc.mu.Unlock()\n\n\t\t// add stream\n\t\tif err = c.getStreamSink().AddStream(stream); err != nil {\n\t\t\tstream.StreamInfo.Status = livekit.StreamInfo_FAILED\n\t\t\tstream.StreamInfo.Error = err.Error()\n\t\t\tstream.UpdateEndTime(time.Now().UnixNano())\n\t\t\terrs.AppendErr(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tc.OutputCount.Inc()\n\t}\n\n\t// remove stream outputs\n\tfor _, rawUrl := range req.RemoveOutputUrls {\n\t\tstream, err := o.GetStream(rawUrl)\n\t\tif err != nil {\n\t\t\terrs.AppendErr(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err = c.streamFinished(ctx, stream); err != nil {\n\t\t\terrs.AppendErr(err)\n\t\t}\n\t}\n\n\tc.streamUpdated(ctx)\n\treturn errs.ToError()\n}\n\nfunc (c *Controller) UpdateEgress(ctx context.Context, req *livekit.UpdateEgressRequest) error {\n\tctx, span := tracer.Start(ctx, \"Pipeline.UpdateEgress\")\n\tdefer span.End()\n\n\terrs := errors.ErrArray{}\n\n\t// update stream targets\n\tif len(req.AddStreamUrls) > 0 || len(req.RemoveStreamUrls) > 0 {\n\t\tstreamReq := &livekit.UpdateStreamRequest{\n\t\t\tEgressId:         req.EgressId,\n\t\t\tAddOutputUrls:    req.AddStreamUrls,\n\t\t\tRemoveOutputUrls: req.RemoveStreamUrls,\n\t\t}\n\t\tif err := c.UpdateStream(ctx, streamReq); err != nil {\n\t\t\terrs.AppendErr(err)\n\t\t}\n\t}\n\n\t// update layout — not yet supported\n\tif req.Layout != \"\" {\n\t\terrs.AppendErr(errors.ErrFeatureDisabled(\"layout update\"))\n\t}\n\n\t// update URL — not yet supported\n\tif req.Url != \"\" {\n\t\terrs.AppendErr(errors.ErrFeatureDisabled(\"url update\"))\n\t}\n\n\treturn errs.ToError()\n}\n\nfunc (c *Controller) streamFinished(ctx context.Context, stream *config.Stream) error {\n\tstream.StreamInfo.Status = livekit.StreamInfo_FINISHED\n\tstream.UpdateEndTime(time.Now().UnixNano())\n\n\t// remove output\n\to := c.GetStreamConfig()\n\to.Streams.Delete(stream.ParsedUrl)\n\tc.OutputCount.Dec()\n\n\t// end egress if no outputs remaining\n\tif c.OutputCount.Load() == 0 {\n\t\tc.SendEOS(ctx, livekit.EndReasonStreamsStopped)\n\t\treturn nil\n\t}\n\n\tlogger.Infow(\"stream finished\",\n\t\t\"url\", stream.RedactedUrl,\n\t\t\"status\", stream.StreamInfo.Status,\n\t\t\"duration\", stream.StreamInfo.Duration,\n\t)\n\n\treturn c.getStreamSink().RemoveStream(stream)\n}\n\nfunc (c *Controller) streamFailed(ctx context.Context, stream *config.Stream, streamErr error) error {\n\tstream.StreamInfo.Status = livekit.StreamInfo_FAILED\n\tstream.StreamInfo.Error = streamErr.Error()\n\tstream.UpdateEndTime(time.Now().UnixNano())\n\n\t// remove output\n\to := c.GetStreamConfig()\n\to.Streams.Delete(stream.ParsedUrl)\n\tc.OutputCount.Dec()\n\n\t// fail egress if no outputs remaining\n\tif c.OutputCount.Load() == 0 {\n\t\treturn psrpc.NewError(psrpc.Unavailable, streamErr)\n\t}\n\n\tlogger.Infow(\"stream failed\",\n\t\t\"url\", stream.RedactedUrl,\n\t\t\"status\", stream.StreamInfo.Status,\n\t\t\"duration\", stream.StreamInfo.Duration,\n\t\t\"error\", streamErr)\n\n\tc.streamUpdated(ctx)\n\treturn c.getStreamSink().RemoveStream(stream)\n}\n\nfunc (c *Controller) trackStreamRetry(ctx context.Context, stream *config.Stream) {\n\tnow := time.Now()\n\tstream.StreamInfo.LastRetryAt = now.UnixNano()\n\tstream.StreamInfo.Retries++\n\tif !stream.ShouldSendRetryUpdate(now, streamRetryUpdateInterval) {\n\t\treturn\n\t}\n\tlogger.Infow(\"retrying stream update\",\n\t\t\"url\", stream.RedactedUrl,\n\t\t\"retries\", stream.StreamInfo.Retries,\n\t)\n\n\tc.streamUpdated(ctx)\n}\n\nfunc (c *Controller) onEOSSent() {\n\t// for video-only track/track composite, EOS might have already\n\t// made it through the pipeline by the time endRecording is closed\n\tif (c.RequestType == types.RequestTypeTrack || c.RequestType == types.RequestTypeTrackComposite) && !c.AudioEnabled {\n\t\t// this will not actually send a second EOS, but will make sure everything is in the correct state\n\t\tc.SendEOS(context.Background(), livekit.EndReasonSrcClosed)\n\t}\n}\n\nfunc (c *Controller) onStorageLimitReached() {\n\tc.storageLimitOnce.Do(func() {\n\t\tc.Info.SetLimitReached()\n\t\tc.SendEOS(context.Background(), livekit.EndReasonLimitReached)\n\t})\n}\n\nfunc (c *Controller) SendEOS(ctx context.Context, reason string) {\n\tctx, span := tracer.Start(ctx, \"Pipeline.SendEOS\")\n\tdefer span.End()\n\n\tc.eosSent.Once(func() {\n\t\tif c.limitTimer != nil {\n\t\t\tc.limitTimer.Stop()\n\t\t}\n\n\t\tc.Info.SetEndReason(reason)\n\t\tlogger.Debugw(\"stopping pipeline\", \"reason\", reason)\n\n\t\tswitch c.Info.Status {\n\t\tcase livekit.EgressStatus_EGRESS_STARTING:\n\t\t\tc.Info.SetAborted(livekit.MsgStoppedBeforeStarted)\n\t\t\tc.p.Stop()\n\n\t\tcase livekit.EgressStatus_EGRESS_ABORTED,\n\t\t\tlivekit.EgressStatus_EGRESS_FAILED:\n\t\t\tc.p.Stop()\n\n\t\tcase livekit.EgressStatus_EGRESS_ACTIVE:\n\t\t\tc.Info.UpdateStatus(livekit.EgressStatus_EGRESS_ENDING)\n\t\t\tc.sendHandlerUpdate(ctx, c.Info)\n\t\t\tc.sendEOS()\n\n\t\tcase livekit.EgressStatus_EGRESS_ENDING:\n\t\t\tc.sendHandlerUpdate(ctx, c.Info)\n\t\t\tc.sendEOS()\n\n\t\tcase livekit.EgressStatus_EGRESS_LIMIT_REACHED:\n\t\t\tc.sendEOS()\n\t\t}\n\n\t\tif c.SourceType == types.SourceTypeWeb {\n\t\t\t// web source uses the current time\n\t\t\tc.updateEndTime()\n\t\t}\n\t})\n}\n\nfunc (c *Controller) sendEOS() {\n\tfor _, sinks := range c.sinks {\n\t\tfor _, s := range sinks {\n\t\t\ts.AddEOSProbe()\n\t\t}\n\t}\n\n\tc.eosTimer = time.AfterFunc(eosTimeout, func() {\n\t\tlogger.Debugw(\"eos timer firing\")\n\t\tfor egressType, si := range c.sinks {\n\t\t\tswitch egressType {\n\t\t\tcase types.EgressTypeFile, types.EgressTypeSegments, types.EgressTypeImages:\n\t\t\t\tfor _, s := range si {\n\t\t\t\t\tif !s.EOSReceived() {\n\t\t\t\t\t\tc.OnError(errors.ErrPipelineFrozen)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\t// finalization not required\n\t\t\t}\n\t\t}\n\t\tc.p.Stop()\n\t})\n\n\tgo func() {\n\t\tc.p.SendEOS()\n\t\tlogger.Debugw(\"eos sent\")\n\t}()\n}\n\nfunc (c *Controller) OnError(err error) {\n\tlogger.Errorw(\"controller onError invoked\", err)\n\tif errors.Is(err, errors.ErrPipelineFrozen) && c.Debug.EnableProfiling {\n\t\tc.generateDotFile(\"error\")\n\t\tc.generatePProf()\n\t}\n\n\tif c.Info.Status != livekit.EgressStatus_EGRESS_FAILED && (!c.eosSent.IsBroken() || c.FinalizationRequired) {\n\t\tc.Info.SetFailed(err)\n\t}\n\n\tgo c.p.Stop()\n}\n\nfunc (c *Controller) Close() {\n\tconst closeSlowThreshold = 1 * time.Hour\n\tcloseStart := time.Now()\n\tcloseDone := make(chan struct{})\n\tdefer close(closeDone)\n\n\tgo func() {\n\t\tselect {\n\t\tcase <-closeDone:\n\t\t\treturn\n\t\tcase <-time.After(closeSlowThreshold):\n\t\t\tlogger.Warnw(\"Close() taking longer than expected\", nil,\n\t\t\t\t\"threshold\", closeSlowThreshold,\n\t\t\t\t\"elapsed\", time.Since(closeStart),\n\t\t\t\t\"egressID\", c.Info.EgressId,\n\t\t\t\t\"sourceType\", c.SourceType,\n\t\t\t)\n\t\t}\n\t}()\n\n\tc.stopOutputSizeMonitor()\n\n\tif c.SourceType == types.SourceTypeSDK || !c.eosSent.IsBroken() {\n\t\t// sdk source will use the timestamp of the last packet pushed to the pipeline\n\t\tc.updateEndTime()\n\t}\n\n\t// update status\n\tif c.Info.Status == livekit.EgressStatus_EGRESS_FAILED {\n\t\tif o := c.GetStreamConfig(); o != nil {\n\t\t\to.Streams.Range(func(_, stream any) bool {\n\t\t\t\tstream.(*config.Stream).StreamInfo.Status = livekit.StreamInfo_FAILED\n\t\t\t\treturn true\n\t\t\t})\n\t\t}\n\t}\n\n\t// ensure egress ends with a final state\n\tswitch c.Info.Status {\n\tcase livekit.EgressStatus_EGRESS_STARTING:\n\t\tc.Info.SetAborted(livekit.MsgStoppedBeforeStarted)\n\n\tcase livekit.EgressStatus_EGRESS_ACTIVE,\n\t\tlivekit.EgressStatus_EGRESS_ENDING:\n\t\tc.Info.SetComplete()\n\t\tfallthrough\n\n\tcase livekit.EgressStatus_EGRESS_LIMIT_REACHED,\n\t\tlivekit.EgressStatus_EGRESS_COMPLETE:\n\t\t// upload manifest and add location to egress info\n\t\tc.uploadManifest()\n\t}\n\n\t// upload debug files\n\tc.uploadDebugFiles()\n}\n\nfunc (c *Controller) startSessionLimitTimer(ctx context.Context) {\n\tvar timeout time.Duration\n\tfor egressType := range c.Outputs {\n\t\tvar t time.Duration\n\t\tswitch egressType {\n\t\tcase types.EgressTypeFile:\n\t\t\tt = c.FileOutputMaxDuration\n\t\tcase types.EgressTypeStream, types.EgressTypeWebsocket:\n\t\t\tt = c.StreamOutputMaxDuration\n\t\tcase types.EgressTypeSegments:\n\t\t\tt = c.SegmentOutputMaxDuration\n\t\tcase types.EgressTypeImages:\n\t\t\tt = c.ImageOutputMaxDuration\n\t\t}\n\t\tif t > 0 && (timeout == 0 || t < timeout) {\n\t\t\ttimeout = t\n\t\t}\n\t}\n\n\tif timeout > 0 {\n\t\tc.limitTimer = time.AfterFunc(timeout, func() {\n\t\t\tswitch c.Info.Status {\n\t\t\tcase livekit.EgressStatus_EGRESS_STARTING:\n\t\t\t\tc.Info.SetAborted(livekit.MsgLimitReachedWithoutStart)\n\t\t\tcase livekit.EgressStatus_EGRESS_ACTIVE:\n\t\t\t\tc.Info.SetLimitReached()\n\t\t\t}\n\n\t\t\tif c.playing.IsBroken() {\n\t\t\t\tc.SendEOS(ctx, livekit.EndReasonLimitReached)\n\t\t\t} else {\n\t\t\t\tc.p.Stop()\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc (c *Controller) startOutputSizeMonitor() {\n\tctx, cancel := context.WithCancel(context.Background())\n\tc.storageMonitorCancel = cancel\n\n\tc.p.AddOnStop(func() error {\n\t\tcancel()\n\t\treturn nil\n\t})\n\n\tgo c.monitorOutputDirSize(ctx)\n}\n\nfunc (c *Controller) stopOutputSizeMonitor() {\n\tif c.storageMonitorCancel != nil {\n\t\tc.storageMonitorCancel()\n\t\tc.storageMonitorCancel = nil\n\t}\n}\n\nfunc (c *Controller) monitorOutputDirSize(ctx context.Context) {\n\tthresholds := []int64{\n\t\t1 << 30,  // 1GB\n\t\t3 << 30,  // 3GB\n\t\t5 << 30,  // 5GB\n\t\t10 << 30, // 10GB\n\t\t20 << 30, // 20GB\n\t\t50 << 30, // 50GB\n\t}\n\n\tticker := time.NewTicker(15 * time.Second)\n\tdefer ticker.Stop()\n\n\tnextThreshold := 0\n\tstatErrorLogged := false\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t}\n\n\t\tsize, files, err := c.getOutputDirStats()\n\t\tif err != nil {\n\t\t\tif !statErrorLogged {\n\t\t\t\tlogger.Debugw(\"failed to stat output directory\", err, \"dir\", c.TmpDir)\n\t\t\t\tstatErrorLogged = true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tstatErrorLogged = false\n\n\t\tif c.FileOutputMaxSize > 0 && size >= c.FileOutputMaxSize {\n\t\t\tc.logOutputFileSizes(files, 10)\n\t\t\tlogger.Warnw(\n\t\t\t\t\"output storage limit reached\",\n\t\t\t\tnil,\n\t\t\t\t\"dir\", c.TmpDir,\n\t\t\t\t\"bytesWritten\", size,\n\t\t\t\t\"limitBytes\", c.FileOutputMaxSize,\n\t\t\t)\n\t\t\tc.onStorageLimitReached()\n\t\t\treturn\n\t\t}\n\n\t\tthresholdTriggered := false\n\t\tfor nextThreshold < len(thresholds) && size >= thresholds[nextThreshold] {\n\t\t\tlogger.Debugw(\n\t\t\t\t\"output size threshold exceeded\",\n\t\t\t\t\"dir\", c.TmpDir,\n\t\t\t\t\"bytesWritten\", size,\n\t\t\t\t\"thresholdBytes\", thresholds[nextThreshold],\n\t\t\t)\n\t\t\tthresholdTriggered = true\n\t\t\tnextThreshold++\n\t\t}\n\t\tif thresholdTriggered {\n\t\t\tc.logOutputFileSizes(files, 10)\n\t\t}\n\t}\n}\n\ntype outputFileStat struct {\n\tpath string\n\tsize int64\n}\n\nfunc (c *Controller) getOutputDirStats() (int64, []outputFileStat, error) {\n\tif c.TmpDir == \"\" {\n\t\treturn 0, nil, nil\n\t}\n\n\tvar files []outputFileStat\n\n\tvar total int64\n\n\terr := filepath.Walk(c.TmpDir, func(p string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\ttotal += info.Size()\n\n\t\trel, relErr := filepath.Rel(c.TmpDir, p)\n\t\tif relErr != nil {\n\t\t\trel = p\n\t\t}\n\n\t\tfiles = append(files, outputFileStat{\n\t\t\tpath: rel,\n\t\t\tsize: info.Size(),\n\t\t})\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn 0, nil, err\n\t}\n\n\tsort.Slice(files, func(i, j int) bool {\n\t\treturn files[i].size > files[j].size\n\t})\n\n\treturn total, files, nil\n}\n\nfunc (c *Controller) logOutputFileSizes(files []outputFileStat, limit int) {\n\tif files == nil {\n\t\treturn\n\t}\n\n\tif limit > 0 && len(files) > limit {\n\t\tfiles = files[:limit]\n\t}\n\n\tfor _, f := range files {\n\t\tlogger.Infow(\"output file size\", \"file\", f.path, \"bytes\", f.size)\n\t}\n}\n\nfunc (c *Controller) updateStartTime(startedAt int64) {\n\tfor egressType, o := range c.Outputs {\n\t\tif len(o) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch egressType {\n\t\tcase types.EgressTypeStream, types.EgressTypeWebsocket:\n\t\t\tstreamConfig := o[0].(*config.StreamConfig)\n\t\t\tif streamConfig.OutputType == types.OutputTypeRTMP {\n\t\t\t\t// rtmp has special start time handling\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstreamConfig.Streams.Range(func(_, stream any) bool {\n\t\t\t\tstream.(*config.Stream).StreamInfo.StartedAt = startedAt\n\t\t\t\treturn true\n\t\t\t})\n\n\t\tcase types.EgressTypeFile:\n\t\t\to[0].(*config.FileConfig).FileInfo.StartedAt = startedAt\n\n\t\tcase types.EgressTypeSegments:\n\t\t\to[0].(*config.SegmentConfig).SegmentsInfo.StartedAt = startedAt\n\n\t\tcase types.EgressTypeImages:\n\t\t\tfor _, c := range o {\n\t\t\t\tc.(*config.ImageConfig).ImagesInfo.StartedAt = startedAt\n\t\t\t}\n\t\t}\n\t}\n\n\tif c.Info.Status == livekit.EgressStatus_EGRESS_STARTING {\n\t\tc.Info.UpdateStatus(livekit.EgressStatus_EGRESS_ACTIVE)\n\t\tc.sendHandlerUpdate(context.Background(), c.Info)\n\t}\n}\n\nfunc (c *Controller) updateStreamStartTime(streamID string) {\n\tif o := c.GetStreamConfig(); o != nil {\n\t\to.Streams.Range(func(_, s any) bool {\n\t\t\tif stream := s.(*config.Stream); stream.StreamID == streamID && stream.StreamInfo.StartedAt == 0 {\n\t\t\t\tlogger.Debugw(\"stream started\", \"url\", stream.RedactedUrl)\n\t\t\t\tstream.StreamInfo.StartedAt = time.Now().UnixNano()\n\t\t\t\tc.Info.UpdatedAt = time.Now().UnixNano()\n\t\t\t\tc.streamUpdated(context.Background())\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn true\n\t\t})\n\t}\n}\n\nfunc (c *Controller) streamUpdated(ctx context.Context) {\n\tc.Info.UpdatedAt = time.Now().UnixNano()\n\n\tif o := c.GetStreamConfig(); o != nil {\n\t\tskipUpdate := false\n\t\t// when adding streams, wait until they've all either started or failed before sending the update\n\t\to.Streams.Range(func(_, stream any) bool {\n\t\t\tstreamInfo := stream.(*config.Stream).StreamInfo\n\t\t\tif streamInfo.Status == livekit.StreamInfo_ACTIVE && streamInfo.StartedAt == 0 {\n\t\t\t\tskipUpdate = true\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn true\n\t\t})\n\t\tif skipUpdate {\n\t\t\treturn\n\t\t}\n\t}\n\n\tc.sendHandlerUpdate(ctx, c.Info)\n}\n\nfunc (c *Controller) sendHandlerUpdate(ctx context.Context, info *livekit.EgressInfo) {\n\tif c.ipcServiceClient != nil {\n\t\t_, _ = c.ipcServiceClient.HandlerUpdate(ctx, info)\n\t}\n}\n\nfunc (c *Controller) updateEndTime() {\n\tendedAt := c.src.GetEndedAt()\n\tif c.pipelineEndedAt > endedAt {\n\t\tendedAt = c.pipelineEndedAt\n\t}\n\n\tfor egressType, o := range c.Outputs {\n\t\tif len(o) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tswitch egressType {\n\t\tcase types.EgressTypeStream, types.EgressTypeWebsocket:\n\t\t\tstreamConfig := o[0].(*config.StreamConfig)\n\t\t\tstreamConfig.Streams.Range(func(_, s any) bool {\n\t\t\t\tstream := s.(*config.Stream)\n\t\t\t\tstream.StreamInfo.Status = livekit.StreamInfo_FINISHED\n\t\t\t\tstream.UpdateEndTime(endedAt)\n\t\t\t\treturn true\n\t\t\t})\n\n\t\tcase types.EgressTypeFile:\n\t\t\tfileInfo := o[0].(*config.FileConfig).FileInfo\n\t\t\tif fileInfo.StartedAt == 0 {\n\t\t\t\tfileInfo.StartedAt = endedAt\n\t\t\t}\n\t\t\tfileInfo.EndedAt = endedAt\n\t\t\tfileInfo.Duration = endedAt - fileInfo.StartedAt\n\n\t\tcase types.EgressTypeSegments:\n\t\t\tsegmentsInfo := o[0].(*config.SegmentConfig).SegmentsInfo\n\t\t\tif segmentsInfo.StartedAt == 0 {\n\t\t\t\tsegmentsInfo.StartedAt = endedAt\n\t\t\t}\n\t\t\tsegmentsInfo.EndedAt = endedAt\n\t\t\tsegmentsInfo.Duration = endedAt - segmentsInfo.StartedAt\n\n\t\tcase types.EgressTypeImages:\n\t\t\tfor _, c := range o {\n\t\t\t\timageInfo := c.(*config.ImageConfig).ImagesInfo\n\t\t\t\tif imageInfo.StartedAt == 0 {\n\t\t\t\t\timageInfo.StartedAt = endedAt\n\t\t\t\t}\n\t\t\t\timageInfo.EndedAt = endedAt\n\t\t\t}\n\t\t}\n\t}\n}\n\n// uploadManifest happens last, after all sinks have finished\nfunc (c *Controller) uploadManifest() {\n\tif c.Manifest == nil {\n\t\treturn\n\t}\n\n\tb, err := c.Manifest.Close(c.Info.EndedAt)\n\tif err != nil {\n\t\tlogger.Errorw(\"failed to close manifest\", err)\n\t\treturn\n\t}\n\n\tmanifestPath := path.Join(c.TmpDir, fmt.Sprintf(\"%s.json\", c.Info.EgressId))\n\tf, err := os.Create(manifestPath)\n\tif err != nil {\n\t\tlogger.Errorw(\"failed to create manifest file\", err)\n\t\treturn\n\t}\n\n\t_, err = f.Write(b)\n\tif err != nil {\n\t\tlogger.Errorw(\"failed to write to manifest file\", err)\n\t\treturn\n\t}\n\t_ = f.Close()\n\n\tinfoUpdated := false\n\tfor _, si := range c.sinks {\n\t\tfor _, s := range si {\n\t\t\tlocation, uploaded, err := s.UploadManifest(manifestPath)\n\t\t\tif err != nil {\n\t\t\t\tif c.Info.BackupStorageUsed {\n\t\t\t\t\tlogger.Errorw(\"failed to upload manifest\", err)\n\t\t\t\t} else {\n\t\t\t\t\tlogger.Warnw(\"failed to upload manifest\", err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif !infoUpdated && uploaded {\n\t\t\t\tc.Info.ManifestLocation = location\n\t\t\t\tinfoUpdated = true\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *Controller) getStreamSink() *sink.StreamSink {\n\ts := c.sinks[types.EgressTypeStream]\n\tif len(s) == 0 {\n\t\treturn nil\n\t}\n\n\treturn s[0].(*sink.StreamSink)\n}\n\nfunc (c *Controller) getSegmentSink() *sink.SegmentSink {\n\ts := c.sinks[types.EgressTypeSegments]\n\tif len(s) == 0 {\n\t\treturn nil\n\t}\n\n\treturn s[0].(*sink.SegmentSink)\n}\n\nfunc (c *Controller) getImageSink(name string) *sink.ImageSink {\n\tid := name[len(\"multifilesink_\"):]\n\n\ts := c.sinks[types.EgressTypeImages]\n\tif len(s) == 0 {\n\t\treturn nil\n\t}\n\n\t// Use a map here?\n\tfor _, si := range s {\n\t\tif i := si.(*sink.ImageSink); i.Id == id {\n\t\t\treturn i\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/pipeline/debug.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage pipeline\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/go-gst/go-gst/gst\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/status\"\n\n\t\"github.com/livekit/egress/pkg/pipeline/sink/uploader\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/pprof\"\n)\n\nfunc (c *Controller) GetGstPipelineDebugDot() (string, error) {\n\tdot := make(chan string, 1)\n\tgo func() {\n\t\tdot <- c.p.DebugBinToDotData(gst.DebugGraphShowAll)\n\t}()\n\n\tselect {\n\tcase d := <-dot:\n\t\treturn d, nil\n\tcase <-time.After(3 * time.Second):\n\t\treturn \"\", status.New(codes.DeadlineExceeded, \"timed out requesting pipeline debug info\").Err()\n\t}\n}\n\nfunc sanitizeDebugFilenameComponent(s string) string {\n\tvar b strings.Builder\n\tfor _, r := range s {\n\t\tif (r >= 'a' && r <= 'z') ||\n\t\t\t(r >= 'A' && r <= 'Z') ||\n\t\t\t(r >= '0' && r <= '9') ||\n\t\t\tr == '-' || r == '_' {\n\t\t\tb.WriteRune(r)\n\t\t} else {\n\t\t\tb.WriteRune('_')\n\t\t}\n\t}\n\treturn strings.Trim(b.String(), \"_\")\n}\n\nfunc (c *Controller) writeDotFile(filename, contents string) {\n\tf, err := os.Create(path.Join(c.TmpDir, filename))\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\t_, _ = f.WriteString(contents)\n}\n\nfunc (c *Controller) generateDotFile(reason string) {\n\tdot, err := c.GetGstPipelineDebugDot()\n\tif err != nil {\n\t\tlogger.Errorw(\"failed to get gst pipeline debug dot\", err)\n\t\treturn\n\t}\n\n\t// always write the canonical file name for easy discovery\n\tc.writeDotFile(fmt.Sprintf(\"%s.dot\", c.Info.EgressId), dot)\n\n\tif reason == \"\" {\n\t\tlogger.Errorw(\"failed to get gst pipeline debug dot, reason is empty\", nil)\n\t\treturn\n\t}\n\n\t// make sure all dot captures for the egressID are written with timestamp suffix\n\tvar suffixParts []string\n\tif ext := sanitizeDebugFilenameComponent(reason); ext != \"\" {\n\t\tsuffixParts = append(suffixParts, ext)\n\t}\n\tsuffixParts = append(suffixParts, time.Now().UTC().Format(\"20060102T150405Z\"))\n\n\tfilename := fmt.Sprintf(\"%s_%s.dot\", c.Info.EgressId, strings.Join(suffixParts, \"_\"))\n\tc.writeDotFile(filename, dot)\n}\n\nfunc (c *Controller) generatePProf() {\n\tb, err := pprof.GetProfileData(context.Background(), \"goroutine\", 0, 0)\n\tif err != nil {\n\t\tlogger.Errorw(\"failed to get profile data\", err)\n\t\treturn\n\t}\n\n\tf, err := os.Create(path.Join(c.TmpDir, fmt.Sprintf(\"%s.prof\", c.Info.EgressId)))\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\t_, _ = f.Write(b)\n}\n\nvar debugFileDataTypes = map[string]types.OutputType{\n\t\"csv\":  \"text/csv\",\n\t\"dot\":  types.OutputTypeBlob,\n\t\"prof\": types.OutputTypeBlob,\n\t\"log\":  \"text/plain\",\n}\n\nfunc (c *Controller) uploadDebugFiles() {\n\tfiles, err := os.ReadDir(c.TmpDir)\n\tif err != nil {\n\t\tlogger.Errorw(\"failed to read tmp dir\", err)\n\t\treturn\n\t}\n\n\tvar u *uploader.Uploader\n\n\tfor _, f := range files {\n\t\tinfo, err := f.Info()\n\t\tif err != nil || info.Size() == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\ts := strings.Split(f.Name(), \".\")\n\t\toutputType, ok := debugFileDataTypes[s[len(s)-1]]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tif u == nil {\n\t\t\tu, err = uploader.New(&c.Debug.StorageConfig, nil, c.monitor, nil, nil)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"failed to create uploader\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tlocal := path.Join(c.TmpDir, f.Name())\n\t\tstorage := path.Join(c.Info.EgressId, f.Name())\n\t\t_, _, err = u.Upload(local, storage, outputType, false)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"failed to upload debug file\", err, \"filename\", local)\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/pipeline/sink/file.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage sink\n\nimport (\n\t\"path\"\n\t\"time\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/pipeline/builder\"\n\t\"github.com/livekit/egress/pkg/pipeline/sink/uploader\"\n\t\"github.com/livekit/egress/pkg/stats\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\ntype FileSink struct {\n\t*base\n\t*config.FileConfig\n\t*uploader.Uploader\n\n\tconf *config.PipelineConfig\n}\n\nfunc newFileSink(\n\tp *gstreamer.Pipeline,\n\tconf *config.PipelineConfig,\n\to *config.FileConfig,\n\tmonitor *stats.HandlerMonitor,\n) (*FileSink, error) {\n\tu, err := uploader.New(o.StorageConfig, conf.BackupConfig, monitor, conf.StorageObserver, conf.Info)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileBin, err := builder.BuildFileBin(p, conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = p.AddSinkBin(fileBin); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &FileSink{\n\t\tbase:       &base{bin: fileBin},\n\t\tFileConfig: o,\n\t\tUploader:   u,\n\t\tconf:       conf,\n\t}, nil\n}\n\nfunc (s *FileSink) Start() error {\n\treturn nil\n}\n\nfunc (s *FileSink) UploadManifest(filepath string) (string, bool, error) {\n\tif s.DisableManifest && !s.conf.Info.BackupStorageUsed {\n\t\treturn \"\", false, nil\n\t}\n\n\tstoragePath := path.Join(path.Dir(s.StorageFilepath), path.Base(filepath))\n\tlocation, _, err := s.Upload(filepath, storagePath, types.OutputTypeJSON, false)\n\tif err != nil {\n\t\treturn \"\", false, err\n\t}\n\n\treturn location, true, nil\n}\n\nfunc (s *FileSink) Close() error {\n\tstart := time.Now()\n\tlocation, size, err := s.Upload(s.LocalFilepath, s.StorageFilepath, s.OutputType, false)\n\tif err != nil {\n\t\tlogger.Debugw(\"file upload failed\", err)\n\t\treturn err\n\t}\n\n\ts.FileInfo.Location = location\n\ts.FileInfo.Size = size\n\tlogger.Debugw(\"file upload completed\",\n\t\t\"bytes\", size,\n\t\t\"duration\", time.Since(start))\n\n\tif s.conf.Manifest != nil {\n\t\ts.conf.Manifest.AddFile(s.StorageFilepath, location)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/pipeline/sink/image.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage sink\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/frostbyte73/core\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/pipeline/builder\"\n\t\"github.com/livekit/egress/pkg/pipeline/sink/uploader\"\n\t\"github.com/livekit/egress/pkg/stats\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\ntype ImageSink struct {\n\t*base\n\t*config.ImageConfig\n\t*uploader.Uploader\n\n\tconf      *config.PipelineConfig\n\tcallbacks *gstreamer.Callbacks\n\n\tinitialized      bool\n\tstartTime        time.Time\n\tstartRunningTime uint64\n\n\tcreatedImages chan *imageUpdate\n\tdone          core.Fuse\n}\n\ntype imageUpdate struct {\n\ttimestamp uint64\n\tfilename  string\n}\n\nfunc newImageSink(\n\tp *gstreamer.Pipeline,\n\tconf *config.PipelineConfig,\n\to *config.ImageConfig,\n\tcallbacks *gstreamer.Callbacks,\n\tmonitor *stats.HandlerMonitor,\n) (*ImageSink, error) {\n\tu, err := uploader.New(o.StorageConfig, conf.BackupConfig, monitor, conf.StorageObserver, conf.Info)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\timageBin, err := builder.BuildImageBin(o, p, conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = p.AddSinkBin(imageBin); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmaxPendingUploads := (conf.MaxUploadQueue * 60) / int(o.CaptureInterval)\n\treturn &ImageSink{\n\t\tbase: &base{\n\t\t\tbin: imageBin,\n\t\t},\n\n\t\tImageConfig: o,\n\t\tUploader:    u,\n\n\t\tconf:          conf,\n\t\tcallbacks:     callbacks,\n\t\tcreatedImages: make(chan *imageUpdate, maxPendingUploads),\n\t}, nil\n}\n\nfunc (s *ImageSink) Start() error {\n\tgo func() {\n\t\tvar err error\n\t\tdefer func() {\n\t\t\tif err != nil {\n\t\t\t\ts.callbacks.OnError(err)\n\t\t\t}\n\t\t\ts.done.Break()\n\t\t}()\n\n\t\tfor update := range s.createdImages {\n\t\t\terr = s.handleNewImage(update)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"new image handling failed\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (s *ImageSink) handleNewImage(update *imageUpdate) error {\n\ts.ImagesInfo.ImageCount++\n\n\tfilename := update.filename\n\tts := s.getImageTime(update.timestamp)\n\timageLocalPath := path.Join(s.LocalDir, filename)\n\tif s.ImageSuffix != livekit.ImageFileSuffix_IMAGE_SUFFIX_INDEX {\n\t\tvar newFilename string\n\n\t\tswitch s.ImageSuffix {\n\t\tcase livekit.ImageFileSuffix_IMAGE_SUFFIX_TIMESTAMP:\n\t\t\tnewFilename = fmt.Sprintf(\"%s_%s%03d%s\", s.ImagePrefix, ts.Format(\"20060102150405\"), ts.UnixMilli()%1000, types.FileExtensionForOutputType[s.OutputType])\n\t\tcase livekit.ImageFileSuffix_IMAGE_SUFFIX_NONE_OVERWRITE:\n\t\t\tnewFilename = fmt.Sprintf(\"%s%s\", s.ImagePrefix, types.FileExtensionForOutputType[s.OutputType])\n\t\tdefault:\n\t\t\treturn errors.ErrNotSupported(s.ImageSuffix.String())\n\t\t}\n\n\t\tfilename = newFilename\n\t}\n\n\timageStoragePath := path.Join(s.StorageDir, filename)\n\n\tlocation, _, err := s.Upload(imageLocalPath, imageStoragePath, s.OutputType, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif s.conf.Manifest != nil {\n\t\ts.conf.Manifest.AddImage(imageStoragePath, ts, location)\n\t}\n\n\treturn nil\n}\n\nfunc (s *ImageSink) getImageTime(pts uint64) time.Time {\n\tif !s.initialized {\n\t\ts.startTime = time.Now()\n\t\ts.startRunningTime = pts\n\t\ts.initialized = true\n\t}\n\n\treturn s.startTime.Add(time.Duration(pts - s.startRunningTime))\n}\n\nfunc (s *ImageSink) NewImage(filepath string, ts uint64) error {\n\tif !strings.HasPrefix(filepath, s.LocalDir) {\n\t\treturn fmt.Errorf(\"invalid filepath\")\n\t}\n\n\tfilename := filepath[len(s.LocalDir)+1:]\n\n\ts.createdImages <- &imageUpdate{\n\t\tfilename:  filename,\n\t\ttimestamp: ts,\n\t}\n\n\treturn nil\n}\n\nfunc (s *ImageSink) UploadManifest(filepath string) (string, bool, error) {\n\tif s.DisableManifest && !s.conf.Info.BackupStorageUsed {\n\t\treturn \"\", false, nil\n\t}\n\n\tstoragePath := path.Join(s.StorageDir, path.Base(filepath))\n\tlocation, _, err := s.Upload(filepath, storagePath, types.OutputTypeJSON, false)\n\tif err != nil {\n\t\treturn \"\", false, err\n\t}\n\n\treturn location, true, nil\n}\n\nfunc (s *ImageSink) Close() error {\n\tclose(s.createdImages)\n\t<-s.done.Watch()\n\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/pipeline/sink/m3u8/writer.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage m3u8\n\nimport (\n\t\"container/list\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype PlaylistType string\n\nconst (\n\tPlaylistTypeLive  PlaylistType = \"\"\n\tPlaylistTypeEvent PlaylistType = \"EVENT\"\n)\n\ntype PlaylistWriter interface {\n\tAppend(dateTime time.Time, duration float64, filename string) error\n\tClose() error\n}\n\ntype basePlaylistWriter struct {\n\tfilename       string\n\ttargetDuration int\n}\n\ntype eventPlaylistWriter struct {\n\tbasePlaylistWriter\n}\n\ntype livePlaylistWriter struct {\n\tbasePlaylistWriter\n\n\twindowSize int\n\tmediaSeq   int\n\n\tlivePlaylistHeader   string\n\tlivePlaylistSegments *list.List\n}\n\nfunc (p *basePlaylistWriter) createHeader(plType PlaylistType) string {\n\tvar sb strings.Builder\n\tsb.WriteString(\"#EXTM3U\\n\")\n\tsb.WriteString(\"#EXT-X-VERSION:4\\n\")\n\tif plType != PlaylistTypeLive {\n\t\tfmt.Fprintf(&sb, \"#EXT-X-PLAYLIST-TYPE:%s\\n\", plType)\n\t}\n\tsb.WriteString(\"#EXT-X-ALLOW-CACHE:NO\\n\")\n\tfmt.Fprintf(&sb, \"#EXT-X-TARGETDURATION:%d\\n\", p.targetDuration)\n\tif plType != PlaylistTypeLive {\n\t\tsb.WriteString(\"#EXT-X-MEDIA-SEQUENCE:0\\n\")\n\t}\n\n\treturn sb.String()\n}\n\nfunc (p *basePlaylistWriter) createSegmentEntry(dateTime time.Time, duration float64, filename string) string {\n\tvar sb strings.Builder\n\n\tsb.WriteString(\"#EXT-X-PROGRAM-DATE-TIME:\")\n\tsb.WriteString(dateTime.UTC().Format(\"2006-01-02T15:04:05.999Z07:00\"))\n\tsb.WriteString(\"\\n#EXTINF:\")\n\tsb.WriteString(strconv.FormatFloat(duration, 'f', 3, 32))\n\tsb.WriteString(\",\\n\")\n\tsb.WriteString(filename)\n\tsb.WriteString(\"\\n\")\n\n\treturn sb.String()\n}\n\nfunc NewEventPlaylistWriter(filename string, targetDuration int) (PlaylistWriter, error) {\n\tp := &eventPlaylistWriter{\n\t\tbasePlaylistWriter: basePlaylistWriter{\n\t\t\tfilename:       filename,\n\t\t\ttargetDuration: targetDuration,\n\t\t},\n\t}\n\n\tf, err := os.Create(p.filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\t_, err = f.WriteString(p.createHeader(PlaylistTypeEvent))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn p, nil\n}\n\nfunc (p *eventPlaylistWriter) Append(dateTime time.Time, duration float64, filename string) error {\n\tf, err := os.OpenFile(p.filename, os.O_WRONLY|os.O_APPEND, fs.ModeAppend)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\t_, err = f.WriteString(p.createSegmentEntry(dateTime, duration, filename))\n\treturn err\n}\n\n// Close sliding playlist and make them fixed.\nfunc (p *eventPlaylistWriter) Close() error {\n\tf, err := os.OpenFile(p.filename, os.O_WRONLY|os.O_APPEND, fs.ModeAppend)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\t_, err = f.WriteString(\"#EXT-X-ENDLIST\\n\")\n\treturn err\n}\n\nfunc NewLivePlaylistWriter(filename string, targetDuration int, windowSize int) (PlaylistWriter, error) {\n\tp := &livePlaylistWriter{\n\t\tbasePlaylistWriter: basePlaylistWriter{\n\t\t\tfilename:       filename,\n\t\t\ttargetDuration: targetDuration,\n\t\t},\n\t\twindowSize:           windowSize,\n\t\tlivePlaylistSegments: list.New(),\n\t}\n\n\tp.livePlaylistHeader = p.createHeader(PlaylistTypeLive)\n\n\treturn p, nil\n}\n\nfunc (p *livePlaylistWriter) Append(dateTime time.Time, duration float64, filename string) error {\n\tf, err := os.Create(p.filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tsegmentStr := p.createSegmentEntry(dateTime, duration, filename)\n\tp.livePlaylistSegments.PushBack(segmentStr)\n\n\tfor p.livePlaylistSegments.Len() > p.windowSize {\n\t\tp.livePlaylistSegments.Remove(p.livePlaylistSegments.Front())\n\t\tp.mediaSeq++\n\t}\n\n\t_, err = f.WriteString(p.generatePlaylist())\n\treturn err\n}\n\nfunc (p *livePlaylistWriter) Close() error {\n\tf, err := os.Create(p.filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\t_, err = f.WriteString(p.generatePlaylist())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = f.WriteString(\"#EXT-X-ENDLIST\\n\")\n\treturn err\n}\n\nfunc (p *livePlaylistWriter) generatePlaylist() string {\n\tvar sb strings.Builder\n\tsb.WriteString(p.livePlaylistHeader)\n\tfmt.Fprintf(&sb, \"#EXT-X-MEDIA-SEQUENCE:%d\\n\", p.mediaSeq)\n\tfor elem := p.livePlaylistSegments.Front(); elem != nil; elem = elem.Next() {\n\t\tsegmentStr := elem.Value.(string)\n\t\tsb.WriteString(segmentStr)\n\t}\n\n\treturn sb.String()\n}\n"
  },
  {
    "path": "pkg/pipeline/sink/m3u8/writer_test.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage m3u8\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestEventPlaylistWriter(t *testing.T) {\n\tplaylistName := \"playlist.m3u8\"\n\n\tw, err := NewEventPlaylistWriter(playlistName, 6)\n\trequire.NoError(t, err)\n\n\tt.Cleanup(func() { _ = os.Remove(playlistName) })\n\n\tnow := time.Unix(0, 1683154504814142000)\n\tduration := 5.994\n\n\tfor i := 0; i < 3; i++ {\n\t\trequire.NoError(t, w.Append(now, duration, fmt.Sprintf(\"playlist_0000%d.ts\", i)))\n\t\tnow = now.Add(time.Millisecond * 5994)\n\t}\n\n\trequire.NoError(t, w.Close())\n\n\tb, err := os.ReadFile(playlistName)\n\trequire.NoError(t, err)\n\n\texpected := \"#EXTM3U\\n#EXT-X-VERSION:4\\n#EXT-X-PLAYLIST-TYPE:EVENT\\n#EXT-X-ALLOW-CACHE:NO\\n#EXT-X-TARGETDURATION:6\\n#EXT-X-MEDIA-SEQUENCE:0\\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:04.814Z\\n#EXTINF:5.994,\\nplaylist_00000.ts\\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:10.808Z\\n#EXTINF:5.994,\\nplaylist_00001.ts\\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:16.802Z\\n#EXTINF:5.994,\\nplaylist_00002.ts\\n#EXT-X-ENDLIST\\n\"\n\trequire.Equal(t, expected, string(b))\n}\n\nfunc TestLivePlaylistWriter(t *testing.T) {\n\tplaylistName := \"playlist.m3u8\"\n\n\tw, err := NewLivePlaylistWriter(playlistName, 6, 3)\n\trequire.NoError(t, err)\n\n\tt.Cleanup(func() { _ = os.Remove(playlistName) })\n\n\tnow := time.Unix(0, 1683154504814142000)\n\tduration := 5.994\n\n\tfor i := 0; i < 2; i++ {\n\t\trequire.NoError(t, w.Append(now, duration, fmt.Sprintf(\"playlist_0000%d.ts\", i)))\n\t\tnow = now.Add(time.Millisecond * 5994)\n\t}\n\n\tb, err := os.ReadFile(playlistName)\n\trequire.NoError(t, err)\n\n\texpected := \"#EXTM3U\\n#EXT-X-VERSION:4\\n#EXT-X-ALLOW-CACHE:NO\\n#EXT-X-TARGETDURATION:6\\n#EXT-X-MEDIA-SEQUENCE:0\\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:04.814Z\\n#EXTINF:5.994,\\nplaylist_00000.ts\\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:10.808Z\\n#EXTINF:5.994,\\nplaylist_00001.ts\\n\"\n\trequire.Equal(t, expected, string(b))\n\n\tfor i := 2; i < 4; i++ {\n\t\trequire.NoError(t, w.Append(now, duration, fmt.Sprintf(\"playlist_0000%d.ts\", i)))\n\t\tnow = now.Add(time.Millisecond * 5994)\n\t}\n\n\trequire.NoError(t, w.Close())\n\n\tb, err = os.ReadFile(playlistName)\n\trequire.NoError(t, err)\n\n\texpected = \"#EXTM3U\\n#EXT-X-VERSION:4\\n#EXT-X-ALLOW-CACHE:NO\\n#EXT-X-TARGETDURATION:6\\n#EXT-X-MEDIA-SEQUENCE:1\\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:04.814Z\\n#EXTINF:5.994,\\nplaylist_00001.ts\\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:16.802Z\\n#EXTINF:5.994,\\nplaylist_00002.ts\\n#EXT-X-PROGRAM-DATE-TIME:2023-05-03T22:55:22.796Z\\n#EXTINF:5.994,\\nplaylist_00003.ts\\n#EXT-X-ENDLIST\\n\"\n\trequire.Equal(t, expected, string(b))\n}\n"
  },
  {
    "path": "pkg/pipeline/sink/segments.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage sink\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/frostbyte73/core\"\n\t\"github.com/linkdata/deadlock\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/pipeline/builder\"\n\t\"github.com/livekit/egress/pkg/pipeline/sink/m3u8\"\n\t\"github.com/livekit/egress/pkg/pipeline/sink/uploader\"\n\t\"github.com/livekit/egress/pkg/stats\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\nconst (\n\tdefaultLivePlaylistWindow = 5\n)\n\ntype SegmentSink struct {\n\t*base\n\t*uploader.Uploader\n\t*config.SegmentConfig\n\n\tconf             *config.PipelineConfig\n\tmanifestPlaylist *config.Playlist\n\tcallbacks        *gstreamer.Callbacks\n\n\tsegmentCount int\n\tplaylist     m3u8.PlaylistWriter\n\tlivePlaylist m3u8.PlaylistWriter\n\n\tsegmentLock  deadlock.Mutex\n\tinfoLock     deadlock.Mutex\n\tplaylistLock deadlock.Mutex\n\n\tinitialized           bool\n\tstartTime             time.Time\n\tlastUpload            time.Time\n\toutputType            types.OutputType\n\tstartRunningTime      uint64\n\topenSegmentsStartTime map[string]uint64\n\n\tclosedSegments  chan SegmentUpdate\n\tplaylistUpdates chan SegmentUpdate\n\tdone            core.Fuse\n}\n\ntype SegmentUpdate struct {\n\tendTime        uint64\n\tfilename       string\n\tuploadComplete chan struct{}\n}\n\nfunc newSegmentSink(\n\tp *gstreamer.Pipeline,\n\tconf *config.PipelineConfig,\n\to *config.SegmentConfig,\n\tcallbacks *gstreamer.Callbacks,\n\tmonitor *stats.HandlerMonitor,\n) (*SegmentSink, error) {\n\tu, err := uploader.New(o.StorageConfig, conf.BackupConfig, monitor, conf.StorageObserver, conf.Info)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tplaylistName := path.Join(o.LocalDir, o.PlaylistFilename)\n\tplaylist, err := m3u8.NewEventPlaylistWriter(playlistName, o.SegmentDuration)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar livePlaylist m3u8.PlaylistWriter\n\tif o.LivePlaylistFilename != \"\" {\n\t\tplaylistName = path.Join(o.LocalDir, o.LivePlaylistFilename)\n\t\tlivePlaylist, err = m3u8.NewLivePlaylistWriter(playlistName, o.SegmentDuration, defaultLivePlaylistWindow)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\toutputType := o.OutputType\n\tif outputType == types.OutputTypeHLS {\n\t\toutputType = types.OutputTypeTS\n\t}\n\n\tsegmentBin, err := builder.BuildSegmentBin(p, conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = p.AddSinkBin(segmentBin); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmaxPendingUploads := (conf.MaxUploadQueue * 60) / o.SegmentDuration\n\tsegmentSink := &SegmentSink{\n\t\tbase: &base{\n\t\t\tbin: segmentBin,\n\t\t},\n\t\tUploader:              u,\n\t\tSegmentConfig:         o,\n\t\tconf:                  conf,\n\t\tcallbacks:             callbacks,\n\t\tplaylist:              playlist,\n\t\tlivePlaylist:          livePlaylist,\n\t\toutputType:            outputType,\n\t\topenSegmentsStartTime: make(map[string]uint64),\n\t\tclosedSegments:        make(chan SegmentUpdate, maxPendingUploads),\n\t\tplaylistUpdates:       make(chan SegmentUpdate, maxPendingUploads),\n\t}\n\n\tif conf.Manifest != nil {\n\t\tsegmentSink.manifestPlaylist = conf.Manifest.AddPlaylist()\n\t}\n\n\t// Register gauges that track the number of segments and playlist updates pending upload\n\tmonitor.RegisterPlaylistChannelSizeGauge(segmentSink.conf.NodeID, segmentSink.conf.ClusterID, segmentSink.conf.Info.EgressId,\n\t\tfunc() float64 {\n\t\t\treturn float64(len(segmentSink.playlistUpdates))\n\t\t})\n\tmonitor.RegisterSegmentsChannelSizeGauge(segmentSink.conf.NodeID, segmentSink.conf.ClusterID, segmentSink.conf.Info.EgressId,\n\t\tfunc() float64 {\n\t\t\treturn float64(len(segmentSink.closedSegments))\n\t\t})\n\n\treturn segmentSink, nil\n}\n\nfunc (s *SegmentSink) Start() error {\n\tgo func() {\n\t\tdefer close(s.playlistUpdates)\n\t\tfor update := range s.closedSegments {\n\t\t\ts.handleClosedSegment(update)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tdefer s.done.Break()\n\t\tfor update := range s.playlistUpdates {\n\t\t\tif err := s.handlePlaylistUpdates(update); err != nil {\n\t\t\t\ts.callbacks.OnError(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (s *SegmentSink) handleClosedSegment(update SegmentUpdate) {\n\t// keep playlist updates in order\n\ts.playlistUpdates <- update\n\n\tsegmentLocalPath := path.Join(s.LocalDir, update.filename)\n\tsegmentStoragePath := path.Join(s.StorageDir, update.filename)\n\n\t// upload in parallel\n\tgo func() {\n\t\tdefer close(update.uploadComplete)\n\n\t\tlocation, size, err := s.Upload(segmentLocalPath, segmentStoragePath, s.outputType, true)\n\t\tif err != nil {\n\t\t\ts.callbacks.OnError(err)\n\t\t\treturn\n\t\t}\n\n\t\t// lock segment info updates\n\t\ts.infoLock.Lock()\n\t\ts.SegmentsInfo.SegmentCount++\n\t\ts.SegmentsInfo.Size += size\n\t\tif s.manifestPlaylist != nil {\n\t\t\ts.manifestPlaylist.AddSegment(segmentStoragePath, location)\n\t\t}\n\t\ts.infoLock.Unlock()\n\t}()\n}\n\nfunc (s *SegmentSink) handlePlaylistUpdates(update SegmentUpdate) error {\n\ts.segmentLock.Lock()\n\tt, ok := s.openSegmentsStartTime[update.filename]\n\tif !ok {\n\t\ts.segmentLock.Unlock()\n\t\treturn fmt.Errorf(\"no open segment with the name %s\", update.filename)\n\t}\n\tdelete(s.openSegmentsStartTime, update.filename)\n\ts.segmentLock.Unlock()\n\n\tduration := float64(time.Duration(update.endTime-t)) / float64(time.Second)\n\tsegmentStartTime := s.startTime.Add(time.Duration(t - s.startRunningTime))\n\n\t// do not update playlist until upload is complete\n\t<-update.uploadComplete\n\n\ts.playlistLock.Lock()\n\tdefer s.playlistLock.Unlock()\n\n\tif err := s.playlist.Append(segmentStartTime, duration, update.filename); err != nil {\n\t\treturn err\n\t}\n\n\ts.segmentCount++\n\tif s.shouldUploadPlaylist() {\n\t\t// ignore playlist upload failures until close\n\t\t_ = s.uploadPlaylist()\n\t}\n\n\tif s.livePlaylist != nil {\n\t\tif err := s.livePlaylist.Append(segmentStartTime, duration, update.filename); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// ignore playlist upload failures until close\n\t\t_ = s.uploadLivePlaylist()\n\t}\n\n\treturn nil\n}\n\n// Each segment adds about 100 bytes in the playlist, and long playlists can get very large.\n// Uploads every N segments, where N is the number of hours, with a minimum frequency of once per minute\nfunc (s *SegmentSink) shouldUploadPlaylist() bool {\n\treturn s.lastUpload.IsZero() ||\n\t\ts.segmentCount%(int(time.Since(s.startTime)/time.Hour)+1) == 0 ||\n\t\ttime.Since(s.lastUpload) > time.Minute\n}\n\nfunc (s *SegmentSink) uploadPlaylist() error {\n\tplaylistLocalPath := path.Join(s.LocalDir, s.PlaylistFilename)\n\tplaylistStoragePath := path.Join(s.StorageDir, s.PlaylistFilename)\n\tplaylistLocation, _, err := s.Upload(playlistLocalPath, playlistStoragePath, s.OutputType, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.lastUpload = time.Now()\n\ts.SegmentsInfo.PlaylistLocation = playlistLocation\n\tif s.manifestPlaylist != nil {\n\t\ts.manifestPlaylist.Location = playlistLocation\n\t}\n\treturn nil\n}\n\nfunc (s *SegmentSink) uploadLivePlaylist() error {\n\tliveLocalPath := path.Join(s.LocalDir, s.LivePlaylistFilename)\n\tliveStoragePath := path.Join(s.StorageDir, s.LivePlaylistFilename)\n\tlivePlaylistLocation, _, err := s.Upload(liveLocalPath, liveStoragePath, s.OutputType, false)\n\tif err == nil {\n\t\ts.SegmentsInfo.LivePlaylistLocation = livePlaylistLocation\n\t}\n\treturn err\n}\n\nfunc (s *SegmentSink) UpdateStartDate(t time.Time) {\n\ts.segmentLock.Lock()\n\tdefer s.segmentLock.Unlock()\n\n\ts.startTime = t\n}\n\nfunc (s *SegmentSink) FragmentOpened(filepath string, startTime uint64) error {\n\tif !strings.HasPrefix(filepath, s.LocalDir) {\n\t\treturn fmt.Errorf(\"invalid filepath\")\n\t}\n\n\tfilename := filepath[len(s.LocalDir)+1:]\n\n\ts.segmentLock.Lock()\n\tdefer s.segmentLock.Unlock()\n\n\tif !s.initialized {\n\t\ts.initialized = true\n\t\ts.startRunningTime = startTime\n\t}\n\n\tif _, ok := s.openSegmentsStartTime[filename]; ok {\n\t\treturn fmt.Errorf(\"segment with this name already started\")\n\t}\n\n\ts.openSegmentsStartTime[filename] = startTime\n\treturn nil\n}\n\nfunc (s *SegmentSink) FragmentClosed(filepath string, endTime uint64) error {\n\tif !strings.HasPrefix(filepath, s.LocalDir) {\n\t\treturn fmt.Errorf(\"invalid filepath\")\n\t}\n\n\tfilename := filepath[len(s.LocalDir)+1:]\n\n\tselect {\n\tcase s.closedSegments <- SegmentUpdate{\n\t\tfilename:       filename,\n\t\tendTime:        endTime,\n\t\tuploadComplete: make(chan struct{}),\n\t}:\n\t\treturn nil\n\n\tdefault:\n\t\terr := errors.New(\"segment upload job queue is full\")\n\t\tlogger.Infow(\"failed to upload segment\", \"error\", err)\n\t\treturn errors.ErrUploadFailed(filename, err)\n\t}\n}\n\nfunc (s *SegmentSink) UploadManifest(filepath string) (string, bool, error) {\n\tif s.DisableManifest && !s.conf.Info.BackupStorageUsed {\n\t\treturn \"\", false, nil\n\t}\n\n\tstoragePath := path.Join(s.StorageDir, path.Base(filepath))\n\tlocation, _, err := s.Upload(filepath, storagePath, types.OutputTypeJSON, false)\n\tif err != nil {\n\t\treturn \"\", false, err\n\t}\n\n\treturn location, true, nil\n}\n\nfunc (s *SegmentSink) Close() error {\n\t// wait for pending jobs to finish\n\tclose(s.closedSegments)\n\t<-s.done.Watch()\n\n\ts.playlistLock.Lock()\n\tdefer s.playlistLock.Unlock()\n\n\tif err := s.playlist.Close(); err != nil {\n\t\treturn err\n\t}\n\tif err := s.uploadPlaylist(); err != nil {\n\t\treturn err\n\t}\n\n\tif s.livePlaylist != nil {\n\t\tif err := s.livePlaylist.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := s.uploadLivePlaylist(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/pipeline/sink/sink.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage sink\n\nimport (\n\t\"go.uber.org/atomic\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/stats\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\ntype Sink interface {\n\tStart() error\n\tAddEOSProbe()\n\tEOSReceived() bool\n\tClose() error\n\tUploadManifest(string) (string, bool, error)\n}\n\ntype base struct {\n\tbin         *gstreamer.Bin\n\teosReceived atomic.Bool\n}\n\nfunc NewSink(\n\tp *gstreamer.Pipeline,\n\tconf *config.PipelineConfig,\n\tegressType types.EgressType,\n\to config.OutputConfig,\n\tcallbacks *gstreamer.Callbacks,\n\tmonitor *stats.HandlerMonitor,\n) (Sink, error) {\n\n\tswitch egressType {\n\tcase types.EgressTypeFile:\n\t\treturn newFileSink(p, conf, o.(*config.FileConfig), monitor)\n\n\tcase types.EgressTypeSegments:\n\t\treturn newSegmentSink(p, conf, o.(*config.SegmentConfig), callbacks, monitor)\n\n\tcase types.EgressTypeStream:\n\t\treturn newStreamSink(p, conf, o.(*config.StreamConfig))\n\n\tcase types.EgressTypeWebsocket:\n\t\treturn newWebsocketSink(p, o.(*config.StreamConfig), types.MimeTypeRawAudio, callbacks)\n\n\tcase types.EgressTypeImages:\n\t\treturn newImageSink(p, conf, o.(*config.ImageConfig), callbacks, monitor)\n\n\tdefault:\n\t\treturn nil, errors.ErrInvalidInput(\"output type\")\n\t}\n}\n\nfunc (s *base) AddEOSProbe() {\n\tif err := s.bin.AddOnEOSReceived(func() {\n\t\tlogger.Debugw(\"eos received\", \"sink\", s.bin.GetName())\n\t\ts.eosReceived.Store(true)\n\t}); err != nil {\n\t\tlogger.Errorw(\"failed to add EOS probe\", err)\n\t}\n}\n\nfunc (s *base) EOSReceived() bool {\n\treturn s.eosReceived.Load()\n}\n"
  },
  {
    "path": "pkg/pipeline/sink/stream.go",
    "content": "// Copyright 2025 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage sink\n\nimport (\n\t\"time\"\n\n\t\"github.com/frostbyte73/core\"\n\t\"github.com/linkdata/deadlock\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/logging\"\n\t\"github.com/livekit/egress/pkg/pipeline/builder\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\ntype StreamSink struct {\n\t*base\n\n\tconf   *config.PipelineConfig\n\tbin    *builder.StreamBin\n\tclosed core.Fuse\n\n\tmu      deadlock.RWMutex\n\tstreams map[string]*builder.Stream\n\tloggers map[string]*logging.CSVLogger[logging.StreamStats]\n}\n\nfunc newStreamSink(p *gstreamer.Pipeline, conf *config.PipelineConfig, o *config.StreamConfig) (*StreamSink, error) {\n\tstreamBin, err := builder.BuildStreamBin(p, conf, o)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tss := &StreamSink{\n\t\tbase: &base{\n\t\t\tbin: streamBin.Bin,\n\t\t},\n\t\tconf:    conf,\n\t\tbin:     streamBin,\n\t\tstreams: make(map[string]*builder.Stream),\n\t\tloggers: make(map[string]*logging.CSVLogger[logging.StreamStats]),\n\t}\n\n\to.Streams.Range(func(_, stream any) bool {\n\t\terr = ss.AddStream(stream.(*config.Stream))\n\t\treturn err == nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = p.AddSinkBin(streamBin.Bin); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ss, nil\n}\n\nfunc (s *StreamSink) Start() error {\n\tif s.conf.Debug.EnableStreamLogging {\n\t\tgo func() {\n\t\t\tclosed := s.closed.Watch()\n\t\t\tticker := time.NewTicker(time.Second * 10)\n\t\t\tdefer ticker.Stop()\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-closed:\n\t\t\t\t\treturn\n\n\t\t\t\tcase <-ticker.C:\n\t\t\t\t\ts.mu.RLock()\n\t\t\t\t\tfor name, stream := range s.streams {\n\t\t\t\t\t\tif stats, ok := stream.Stats(); ok {\n\t\t\t\t\t\t\tif csvLogger, ok := s.loggers[name]; ok {\n\t\t\t\t\t\t\t\tcsvLogger.Write(stats)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\ts.mu.RUnlock()\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\treturn nil\n}\n\nfunc (s *StreamSink) AddStream(stream *config.Stream) error {\n\tss, err := s.bin.BuildStream(stream, s.conf.Framerate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.mu.Lock()\n\ts.streams[stream.Name] = ss\n\tif s.conf.Debug.EnableStreamLogging && s.bin.OutputType == types.OutputTypeRTMP {\n\t\tcsvLogger, err := logging.NewCSVLogger[logging.StreamStats](stream.Name)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"failed to create stream logger\", err)\n\t\t} else {\n\t\t\ts.loggers[stream.Name] = csvLogger\n\t\t}\n\t}\n\n\ts.mu.Unlock()\n\n\treturn s.bin.Bin.AddSinkBin(ss.Bin)\n}\n\nfunc (s *StreamSink) GetStream(name string) (*config.Stream, error) {\n\ts.mu.Lock()\n\tss, ok := s.streams[name]\n\ts.mu.Unlock()\n\tif !ok {\n\t\treturn nil, errors.ErrStreamNotFound(name)\n\t}\n\n\treturn ss.Conf, nil\n}\n\nfunc (s *StreamSink) ResetStream(stream *config.Stream, streamErr error) (bool, error) {\n\ts.mu.Lock()\n\tss, ok := s.streams[stream.Name]\n\ts.mu.Unlock()\n\tif !ok {\n\t\treturn false, errors.ErrStreamNotFound(stream.RedactedUrl)\n\t}\n\n\treturn ss.Reset(streamErr)\n}\n\nfunc (s *StreamSink) RemoveStream(stream *config.Stream) error {\n\ts.mu.Lock()\n\t_, ok := s.streams[stream.Name]\n\tif !ok {\n\t\ts.mu.Unlock()\n\t\treturn errors.ErrStreamNotFound(stream.RedactedUrl)\n\t}\n\tdelete(s.streams, stream.Name)\n\ts.mu.Unlock()\n\n\treturn s.bin.Bin.RemoveSinkBin(stream.Name)\n}\n\nfunc (s *StreamSink) UploadManifest(_ string) (string, bool, error) {\n\treturn \"\", false, nil\n}\n\nfunc (s *StreamSink) Close() error {\n\ts.closed.Once(func() {\n\t\ts.mu.Lock()\n\t\tdefer s.mu.Unlock()\n\n\t\tfor _, l := range s.loggers {\n\t\t\tl.Close()\n\t\t}\n\t})\n\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/pipeline/sink/uploader/uploader.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage uploader\n\nimport (\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/stats\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/observability/storageobs\"\n\t\"github.com/livekit/psrpc\"\n\t\"github.com/livekit/storage\"\n)\n\nconst presignedExpiration = time.Hour * 24 * 7 // 7 days\n\ntype Uploader struct {\n\tprimary         *store\n\tbackup          *store\n\tprimaryFailed   bool\n\tinfo            *livekit.EgressInfo\n\tmonitor         *stats.HandlerMonitor\n\tstorageObserver config.StorageObserver\n}\n\ntype store struct {\n\tstorage.Storage\n\tconf *config.StorageConfig\n\tname string\n}\n\nfunc New(primary, backup *config.StorageConfig, monitor *stats.HandlerMonitor, storageObserver config.StorageObserver, info *livekit.EgressInfo) (*Uploader, error) {\n\tp, err := getUploader(primary)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tu := &Uploader{\n\t\tprimary:         p,\n\t\tinfo:            info,\n\t\tmonitor:         monitor,\n\t\tstorageObserver: storageObserver,\n\t}\n\n\tif backup != nil {\n\t\tb, err := getUploader(backup)\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"failed to create backup uploader\", err)\n\t\t} else {\n\t\t\tu.backup = b\n\t\t}\n\t}\n\n\treturn u, nil\n}\n\nfunc getUploader(conf *config.StorageConfig) (*store, error) {\n\tif conf == nil {\n\t\tconf = &config.StorageConfig{}\n\t}\n\n\tvar (\n\t\ts    storage.Storage\n\t\terr  error\n\t\tname string\n\t)\n\tswitch {\n\tcase conf.S3 != nil:\n\t\ts, err = storage.NewS3(conf.S3)\n\t\tname = \"S3\"\n\tcase conf.GCP != nil:\n\t\ts, err = storage.NewGCP(conf.GCP)\n\t\tname = \"GCP\"\n\tcase conf.Azure != nil:\n\t\ts, err = storage.NewAzure(conf.Azure)\n\t\tname = \"Azure\"\n\tcase conf.AliOSS != nil:\n\t\ts, err = storage.NewAliOSS(conf.AliOSS)\n\t\tname = \"AliOSS\"\n\tdefault:\n\t\ts, err = storage.NewLocal(&storage.LocalConfig{})\n\t\tname = \"Local\"\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &store{\n\t\tStorage: s,\n\t\tconf:    conf,\n\t\tname:    name,\n\t}, nil\n}\n\nfunc (u *Uploader) Upload(\n\tlocalFilepath, storageFilepath string,\n\toutputType types.OutputType,\n\tdeleteAfterUpload bool,\n) (string, int64, error) {\n\n\tvar primaryErr error\n\tif !u.primaryFailed {\n\t\tstart := time.Now()\n\t\tlocation, size, err := u.upload(localFilepath, storageFilepath, outputType, true)\n\t\telapsed := time.Since(start)\n\t\tif err == nil {\n\t\t\tif u.monitor != nil {\n\t\t\t\tu.monitor.IncUploadCountSuccess(string(outputType), float64(elapsed.Milliseconds()))\n\t\t\t}\n\t\t\tif deleteAfterUpload {\n\t\t\t\t_ = os.Remove(localFilepath)\n\t\t\t}\n\t\t\treturn location, size, nil\n\t\t}\n\t\tif u.monitor != nil {\n\t\t\tu.monitor.IncUploadCountFailure(string(outputType), float64(elapsed.Milliseconds()))\n\t\t}\n\t\tu.primaryFailed = true\n\t\tprimaryErr = err\n\n\t}\n\n\tif u.backup != nil {\n\t\tlocation, size, backupErr := u.upload(localFilepath, storageFilepath, outputType, false)\n\t\tif backupErr == nil {\n\t\t\tif u.info != nil {\n\t\t\t\tu.info.SetBackupUsed()\n\t\t\t}\n\t\t\tif u.monitor != nil {\n\t\t\t\tu.monitor.IncBackupStorageWrites(string(outputType))\n\t\t\t}\n\t\t\tif deleteAfterUpload {\n\t\t\t\t_ = os.Remove(localFilepath)\n\t\t\t}\n\t\t\treturn location, size, nil\n\t\t}\n\n\t\tif primaryErr != nil {\n\t\t\treturn \"\", 0, psrpc.NewErrorf(psrpc.InvalidArgument,\n\t\t\t\t\"primary: %s\\nbackup: %s\", primaryErr.Error(), backupErr.Error())\n\t\t}\n\t\treturn \"\", 0, psrpc.NewError(psrpc.InvalidArgument, backupErr)\n\t}\n\n\treturn \"\", 0, primaryErr\n}\n\nfunc (u *Uploader) upload(localFilepath string, storageFilepath string, outputType types.OutputType, primary bool) (location string, size int64, err error) {\n\tvar s *store\n\tif primary {\n\t\ts = u.primary\n\t} else {\n\t\ts = u.backup\n\t}\n\n\tstorageFilepath = path.Join(s.conf.Prefix, storageFilepath)\n\n\tlocation, size, err = s.UploadFile(localFilepath, storageFilepath, string(outputType))\n\tif err != nil {\n\t\treturn \"\", 0, errors.ErrUploadFailed(s.name, err)\n\t}\n\n\tif !primary && u.storageObserver != nil {\n\t\tu.storageObserver.OnStorageEvent(u.info.EgressId, string(storageobs.EventOperationUpload), location, size, int64(presignedExpiration/time.Hour/24))\n\t}\n\n\tif s.conf.GeneratePresignedUrl {\n\t\tlocation, err = s.GeneratePresignedUrl(storageFilepath, presignedExpiration)\n\t\tif err != nil {\n\t\t\treturn \"\", 0, errors.ErrUploadFailed(s.name, err)\n\t\t}\n\n\t\tif !primary && u.storageObserver != nil {\n\t\t\tu.storageObserver.OnStorageEvent(u.info.EgressId, string(storageobs.EventOperationDownload), location, size, 0)\n\t\t}\n\t}\n\n\treturn location, size, nil\n}\n"
  },
  {
    "path": "pkg/pipeline/sink/uploader/uploader_test.go",
    "content": "package uploader\n\nimport (\n\t\"io\"\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/storage\"\n)\n\nfunc TestUploader(t *testing.T) {\n\tkey := os.Getenv(\"AWS_ACCESS_KEY\")\n\tsecret := os.Getenv(\"AWS_SECRET\")\n\tregion := os.Getenv(\"AWS_REGION\")\n\tbucket := os.Getenv(\"AWS_BUCKET\")\n\n\tprimary := &config.StorageConfig{\n\t\tS3: &storage.S3Config{\n\t\t\tAccessKey: \"nonsense\",\n\t\t\tSecret:    \"public\",\n\t\t\tRegion:    \"us-east-1\",\n\t\t\tBucket:    \"fake-bucket\",\n\t\t},\n\t}\n\tbackup := &config.StorageConfig{\n\t\tPrefix: \"testProject\",\n\t\tS3: &storage.S3Config{\n\t\t\tAccessKey: key,\n\t\t\tSecret:    secret,\n\t\t\tRegion:    region,\n\t\t\tBucket:    bucket,\n\t\t},\n\t\tGeneratePresignedUrl: true,\n\t}\n\n\tinfo := &livekit.EgressInfo{}\n\tu, err := New(primary, backup, nil, nil, info)\n\trequire.NoError(t, err)\n\n\tfilepath := \"uploader_test.go\"\n\tstoragePath := \"uploader_test.go\"\n\n\tlocation, size, err := u.Upload(filepath, storagePath, \"text/plain\", false)\n\trequire.NoError(t, err)\n\n\trequire.NotZero(t, size)\n\trequire.NotEmpty(t, location)\n\trequire.True(t, info.BackupStorageUsed)\n\n\tresponse, err := http.Get(location)\n\trequire.NoError(t, err)\n\tdefer response.Body.Close()\n\n\trequire.Equal(t, http.StatusOK, response.StatusCode)\n\tb, err := io.ReadAll(response.Body)\n\trequire.NoError(t, err)\n\n\trequire.True(t, strings.HasPrefix(string(b), \"package uploader\"))\n}\n"
  },
  {
    "path": "pkg/pipeline/sink/websocket.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage sink\n\nimport (\n\t\"encoding/json\"\n\t\"io\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/go-gst/go-gst/gst\"\n\t\"github.com/go-gst/go-gst/gst/app\"\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/linkdata/deadlock\"\n\t\"go.uber.org/atomic\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/pipeline/builder\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/psrpc\"\n)\n\nconst pingPeriod = time.Second * 30\n\ntype WebsocketSink struct {\n\t*base\n\n\tmu            deadlock.Mutex\n\tconn          *websocket.Conn\n\tsinkCallbacks *app.SinkCallbacks\n\tclosed        atomic.Bool\n}\n\nfunc newWebsocketSink(\n\tp *gstreamer.Pipeline,\n\to *config.StreamConfig,\n\tmimeType types.MimeType,\n\tcallbacks *gstreamer.Callbacks,\n) (*WebsocketSink, error) {\n\n\t// set Content-Type header\n\theader := http.Header{}\n\theader.Set(\"Content-Type\", string(mimeType))\n\n\tvar wsUrl string\n\to.Streams.Range(func(url, _ any) bool {\n\t\twsUrl = url.(string)\n\t\treturn false\n\t})\n\n\tconn, _, err := websocket.DefaultDialer.Dial(wsUrl, header)\n\tif err != nil {\n\t\treturn nil, psrpc.NewError(psrpc.InvalidArgument, err)\n\t}\n\n\twebsocketSink := &WebsocketSink{\n\t\tbase: &base{},\n\t\tconn: conn,\n\t}\n\twebsocketSink.sinkCallbacks = &app.SinkCallbacks{\n\t\tEOSFunc: func(_ *app.Sink) {\n\t\t\t_ = websocketSink.Close()\n\t\t},\n\t\tNewSampleFunc: func(appSink *app.Sink) gst.FlowReturn {\n\t\t\t// pull the sample that triggered this callback\n\t\t\tsample := appSink.PullSample()\n\t\t\tif sample == nil {\n\t\t\t\treturn gst.FlowOK\n\t\t\t}\n\n\t\t\t// retrieve the buffer from the sample\n\t\t\tbuffer := sample.GetBuffer()\n\t\t\tif buffer == nil {\n\t\t\t\treturn gst.FlowOK\n\t\t\t}\n\n\t\t\t// map the buffer to READ operation\n\t\t\tsamples := buffer.Map(gst.MapRead).Bytes()\n\n\t\t\t// send to writer\n\t\t\t_, err = websocketSink.Write(samples)\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\treturn gst.FlowEOS\n\t\t\t\t}\n\t\t\t\tcallbacks.OnError(psrpc.NewError(psrpc.Unavailable, err))\n\t\t\t}\n\n\t\t\treturn gst.FlowOK\n\t\t},\n\t}\n\tcallbacks.AddOnTrackMuted(websocketSink.OnTrackMuted)\n\tcallbacks.AddOnTrackUnmuted(websocketSink.OnTrackUnmuted)\n\n\twebsocketSink.bin, err = builder.BuildWebsocketBin(p, websocketSink.sinkCallbacks)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = p.AddSinkBin(websocketSink.bin); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn websocketSink, nil\n}\n\nfunc (s *WebsocketSink) Start() error {\n\t// override default ping handler to include locking\n\ts.conn.SetPingHandler(func(_ string) error {\n\t\ts.mu.Lock()\n\t\tdefer s.mu.Unlock()\n\n\t\t_ = s.conn.WriteMessage(websocket.PongMessage, []byte(\"pong\"))\n\t\treturn nil\n\t})\n\n\t// read loop is required for the ping handler to receive pings\n\tgo func() {\n\t\terrCount := 0\n\t\tfor {\n\t\t\t_, _, err := s.conn.ReadMessage()\n\t\t\tif s.closed.Load() {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tvar closeError *websocket.CloseError\n\t\t\t\tif errors.As(err, &closeError) ||\n\t\t\t\t\terrors.Is(err, io.EOF) ||\n\t\t\t\t\tstrings.HasSuffix(err.Error(), \"use of closed network connection\") {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terrCount++\n\t\t\t}\n\t\t\t// reads will panic after 1000 errors, break loop before that happens\n\t\t\tif errCount > 100 {\n\t\t\t\tlogger.Errorw(\"closing websocket reader\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\t// write loop for sending pings\n\tgo func() {\n\t\tticker := time.NewTicker(pingPeriod)\n\t\tdefer ticker.Stop()\n\n\t\tfor {\n\t\t\t<-ticker.C\n\t\t\ts.mu.Lock()\n\t\t\tif s.closed.Load() {\n\t\t\t\ts.mu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_ = s.conn.WriteMessage(websocket.PingMessage, []byte(\"ping\"))\n\t\t\ts.mu.Unlock()\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc (s *WebsocketSink) Write(p []byte) (int, error) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif s.closed.Load() {\n\t\treturn 0, io.EOF\n\t}\n\n\treturn len(p), s.conn.WriteMessage(websocket.BinaryMessage, p)\n}\n\nfunc (s *WebsocketSink) OnTrackMuted(_ string) {\n\tif err := s.writeMutedMessage(true); err != nil {\n\t\tlogger.Errorw(\"failed to write mute message\", err)\n\t}\n}\n\nfunc (s *WebsocketSink) OnTrackUnmuted(_ string) {\n\tif err := s.writeMutedMessage(false); err != nil {\n\t\tlogger.Errorw(\"failed to write unmute message\", err)\n\t}\n}\n\ntype textMessagePayload struct {\n\tMuted bool `json:\"muted\"`\n}\n\nfunc (s *WebsocketSink) writeMutedMessage(muted bool) error {\n\tdata, err := json.Marshal(&textMessagePayload{\n\t\tMuted: muted,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif s.closed.Load() {\n\t\treturn nil\n\t}\n\n\treturn s.conn.WriteMessage(websocket.TextMessage, data)\n}\n\nfunc (s *WebsocketSink) UploadManifest(_ string) (string, bool, error) {\n\treturn \"\", false, nil\n}\n\nfunc (s *WebsocketSink) Close() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif !s.closed.Swap(true) {\n\t\tlogger.Debugw(\"closing websocket connection\")\n\n\t\t// write close message for graceful disconnection\n\t\t_ = s.conn.WriteMessage(websocket.CloseMessage, nil)\n\n\t\t// terminate connection and close the `closed` channel\n\t\t_ = s.conn.Close()\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "pkg/pipeline/source/pulse/pactl.go",
    "content": "package pulse\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"os/exec\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n)\n\nfunc Clients() (int, error) {\n\tinfo, err := List()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn len(info.Clients), nil\n}\n\nfunc List() (*PulseInfo, error) {\n\tcmd := exec.Command(\"pactl\", \"--format\", \"json\", \"list\")\n\tvar b, e bytes.Buffer\n\tcmd.Stdout = &b\n\tcmd.Stderr = &e\n\tif cmd.Run() != nil {\n\t\treturn nil, errors.New(e.String())\n\t}\n\n\tinfo := &PulseInfo{}\n\treturn info, json.Unmarshal(b.Bytes(), info)\n}\n\ntype PulseInfo struct {\n\tModules       []Module       `json:\"modules\"`\n\tSinks         []Device       `json:\"sinks\"`\n\tSources       []Device       `json:\"sources\"`\n\tSinkInputs    []SinkInput    `json:\"sink_inputs\"`\n\tSourceOutputs []SourceOutput `json:\"source_outputs\"`\n\tClients       []Client       `json:\"clients\"`\n\tSamples       []interface{}  `json:\"samples\"`\n\tCards         []interface{}  `json:\"cards\"`\n}\n\ntype Module struct {\n\tName         string                 `json:\"name\"`\n\tArgument     string                 `json:\"argument\"`\n\tUsageCounter string                 `json:\"usage_counter\"`\n\tProperties   map[string]interface{} `json:\"properties\"`\n}\n\ntype Device struct {\n\tIndex               int                    `json:\"index\"`\n\tState               string                 `json:\"state\"`\n\tName                string                 `json:\"name\"`\n\tDescription         string                 `json:\"description\"`\n\tDriver              string                 `json:\"driver\"`\n\tSampleSpecification string                 `json:\"sample_specification\"`\n\tChannelMap          string                 `json:\"channel_map\"`\n\tOwnerModule         int                    `json:\"owner_module\"`\n\tMute                bool                   `json:\"mute\"`\n\tVolume              map[string]Volume      `json:\"volume\"`\n\tBalance             float64                `json:\"balance\"`\n\tBaseVolume          Volume                 `json:\"base_volume\"`\n\tMonitorSource       string                 `json:\"monitor_source\"`\n\tLatency             Latency                `json:\"latency\"`\n\tFlags               []string               `json:\"flags\"`\n\tProperties          map[string]interface{} `json:\"properties\"`\n\tPorts               []interface{}          `json:\"ports\"`\n\tActivePort          interface{}            `json:\"active_port\"`\n\tFormats             []string               `json:\"formats\"`\n}\n\ntype IOBase struct {\n\tIndex               int                    `json:\"index\"`\n\tDriver              string                 `json:\"driver\"`\n\tOwnerModule         string                 `json:\"owner_module\"`\n\tClient              string                 `json:\"client\"`\n\tSampleSpecification string                 `json:\"sample_specification\"`\n\tChannelMap          string                 `json:\"channel_map\"`\n\tFormat              string                 `json:\"format\"`\n\tCorked              bool                   `json:\"corked\"`\n\tMute                bool                   `json:\"mute\"`\n\tVolume              map[string]Volume      `json:\"volume\"`\n\tBalance             float64                `json:\"balance\"`\n\tBufferLatencyUSec   float64                `json:\"buffer_latency_usec\"`\n\tSinkLatencyUSec     float64                `json:\"sink_latency_usec\"`\n\tResampleMethod      string                 `json:\"resample_method\"`\n\tProperties          map[string]interface{} `json:\"properties\"`\n}\n\ntype SinkInput struct {\n\tIOBase `json:\",inline\"`\n\tSink   int `json:\"sink\"`\n}\n\ntype SourceOutput struct {\n\tIOBase `json:\",inline\"`\n\tSource int `json:\"source\"`\n}\n\ntype Client struct {\n\tIndex       int                    `json:\"index\"`\n\tDriver      string                 `json:\"driver\"`\n\tOwnerModule string                 `json:\"owner_module\"`\n\tProperties  map[string]interface{} `json:\"properties\"`\n}\n\ntype Volume struct {\n\tValue        int    `json:\"value\"`\n\tValuePercent string `json:\"value_percent\"`\n\tDb           string `json:\"db\"`\n}\n\ntype Latency struct {\n\tActual     float64 `json:\"actual\"`\n\tConfigured float64 `json:\"configured\"`\n}\n\ntype EgressInfo struct {\n\tEgressID      string\n\tSinkInputs    int\n\tSourceOutputs int\n}\n\nfunc (info *PulseInfo) GetEgressInfo() map[int]*EgressInfo {\n\tegressMap := make(map[int]*EgressInfo)\n\tfor _, sink := range info.Sinks {\n\t\tegressMap[sink.Index] = &EgressInfo{\n\t\t\tEgressID: sink.Name,\n\t\t}\n\t}\n\tfor _, sinkInput := range info.SinkInputs {\n\t\tegressMap[sinkInput.Sink].SinkInputs++\n\t}\n\tfor _, sourceOutput := range info.SourceOutputs {\n\t\tegressMap[sourceOutput.Source].SourceOutputs++\n\t}\n\treturn egressMap\n}\n"
  },
  {
    "path": "pkg/pipeline/source/sdk/appwriter.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage sdk\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/frostbyte73/core\"\n\t\"github.com/go-gst/go-gst/gst\"\n\t\"github.com/go-gst/go-gst/gst/app\"\n\t\"github.com/linkdata/deadlock\"\n\t\"github.com/pion/rtp\"\n\t\"github.com/pion/rtp/codecs\"\n\t\"github.com/pion/webrtc/v4\"\n\t\"go.uber.org/atomic\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/logging\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/media-sdk/jitter\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/utils\"\n\tlksdk \"github.com/livekit/server-sdk-go/v2\"\n\t\"github.com/livekit/server-sdk-go/v2/pkg/synchronizer\"\n)\n\nconst (\n\terrBufferTooSmall       = \"buffer too small\"\n\tdiscontinuityTolerance  = 500 * time.Millisecond\n\tpipelineCheckInterval   = 5 * time.Second\n\tcSamplesQueueDepth      = 100\n\tdrainingTimeout         = time.Second * 3\n\tunsubscribedGracePeriod = time.Second * 2\n\n\t// FlowFlushing recovery: threshold of consecutive FlowFlushing returns before\n\t// triggering source bin reset. ~2 seconds of 20ms audio packets.\n\tflushingThreshold = 100\n\t// Maximum number of source bin resets per writer lifetime.\n\tmaxSrcResets = 2\n)\n\nvar errFlowFlushingThreshold = errors.New(\"persistent FlowFlushing detected\")\n\ntype sampleItem struct {\n\tsample []jitter.ExtPacket\n\tnext   *sampleItem\n}\n\ntype AppWriter struct {\n\tconf *config.PipelineConfig\n\n\tlogger    logger.Logger\n\tcsvLogger *logging.CSVLogger[logging.TrackStats]\n\tdrift     atomic.Duration\n\tmaxDrift  atomic.Duration\n\n\tpub         lksdk.TrackPublication\n\ttrack       *webrtc.TrackRemote\n\tcodec       types.MimeType\n\tsrc         *app.Source\n\tstartTime   time.Time\n\ttrackSource *config.TrackSource\n\n\tbuffer *jitter.Buffer\n\n\tsamplesHead *sampleItem\n\tsamplesTail *sampleItem\n\tsamplesLen  int\n\tsamplesLock deadlock.Mutex\n\tsamplesCond *sync.Cond\n\n\ttranslator  Translator\n\tcallbacks   *gstreamer.Callbacks\n\tsendPLI     func()\n\tpliThrottle core.Throttle\n\n\t// a/v sync\n\tsynchronizer *synchronizer.Synchronizer\n\t*synchronizer.TrackSynchronizer\n\tdriftHandler DriftHandler\n\n\tlastPTS              time.Duration\n\tlastDrift            time.Duration\n\tlastPipelineCheckPTS time.Duration\n\tinitialized          bool\n\n\t// state\n\tbuildReady               core.Fuse\n\tactive                   atomic.Bool\n\tlastReceived             atomic.Time\n\tlastPushed               atomic.Time\n\tplaying                  core.Fuse\n\tdraining                 core.Fuse\n\tunsubscribed             core.Fuse\n\tendStreamSignaled        core.Fuse\n\tendStreamSourceProcessed core.Fuse\n\tendStreamProcessed       core.Fuse\n\tfinished                 core.Fuse\n\tstats                    appWriterStats\n\n\t// FlowFlushing recovery\n\tflushingCount int // consecutive FlowFlushing returns from PushBuffer\n\tsrcResetCount int // number of source bin resets performed\n\n\t// diagnostics, set on unexpected flushing when pushing packets to the pipeline\n\tflushDotRequested atomic.Bool\n\n\t// ensure selector/bin removal is only triggered once on terminal read errors\n\tremovalRequested atomic.Bool\n\n\ttpLock       deadlock.RWMutex\n\ttimeProvider gstreamer.TimeProvider\n}\n\ntype appWriterStats struct {\n\tpacketsDropped atomic.Uint64\n}\n\ntype DriftHandler interface {\n\tEnqueueDrift(t time.Duration)\n\tProcessed() time.Duration\n}\n\nfunc NewAppWriter(\n\tconf *config.PipelineConfig,\n\ttrack *webrtc.TrackRemote,\n\tpub lksdk.TrackPublication,\n\trp *lksdk.RemoteParticipant,\n\tts *config.TrackSource,\n\tsynchronizer *synchronizer.Synchronizer,\n\tdriftHandler DriftHandler,\n\tcallbacks *gstreamer.Callbacks,\n) (*AppWriter, error) {\n\tw := &AppWriter{\n\t\tconf:              conf,\n\t\tlogger:            logger.GetLogger().WithValues(\"trackID\", track.ID(), \"kind\", track.Kind().String()),\n\t\ttrack:             track,\n\t\tpub:               pub,\n\t\tcodec:             ts.MimeType,\n\t\tsrc:               ts.AppSrc,\n\t\ttrackSource:       ts,\n\t\tcallbacks:         callbacks,\n\t\tsynchronizer:      synchronizer,\n\t\tTrackSynchronizer: synchronizer.AddTrack(track, rp.Identity()),\n\t\tdriftHandler:      driftHandler,\n\t\ttimeProvider:      gstreamer.NopTimeProvider(),\n\t}\n\tw.samplesCond = sync.NewCond(&w.samplesLock)\n\n\tts.OnKeyframeRequired = w.onKeyframeRequired\n\n\tif conf.Debug.EnableTrackLogging {\n\t\tcsvLogger, err := logging.NewCSVLogger[logging.TrackStats](track.ID())\n\t\tif err != nil {\n\t\t\tlogger.Errorw(\"failed to create csv logger\", err)\n\t\t} else {\n\t\t\tw.csvLogger = csvLogger\n\t\t\tw.OnSenderReport(func(drift time.Duration) {\n\t\t\t\tlogger.Debugw(\"received sender report\", \"drift\", drift)\n\t\t\t\tif w.driftHandler != nil {\n\t\t\t\t\t// presence of the drift handler means that PTS updates on SRs are disabled\n\t\t\t\t\td := drift - w.lastDrift\n\t\t\t\t\tw.lastDrift = drift\n\t\t\t\t\tw.driftHandler.EnqueueDrift(d)\n\t\t\t\t}\n\t\t\t\tw.updateDrift(drift)\n\t\t\t})\n\t\t}\n\t}\n\n\tvar depacketizer rtp.Depacketizer\n\tswitch ts.MimeType {\n\tcase types.MimeTypeOpus:\n\t\tdepacketizer = &codecs.OpusPacket{}\n\t\tw.translator = NewNullTranslator()\n\n\tcase types.MimeTypePCMU, types.MimeTypePCMA:\n\t\tdepacketizer = &G711Packet{}\n\t\tw.translator = NewNullTranslator()\n\n\tcase types.MimeTypeH264:\n\t\tdepacketizer = &codecs.H264Packet{}\n\t\tw.translator = NewNullTranslator()\n\n\tcase types.MimeTypeVP8:\n\t\tdepacketizer = &codecs.VP8Packet{}\n\t\tw.translator = NewVP8Translator(w.logger)\n\n\tcase types.MimeTypeVP9:\n\t\tdepacketizer = &codecs.VP9Packet{}\n\t\tw.translator = NewNullTranslator()\n\n\tdefault:\n\t\treturn nil, errors.ErrNotSupported(string(ts.MimeType))\n\t}\n\n\topts := []jitter.Option{jitter.WithLogger(w.logger)}\n\n\tif track.Kind() == webrtc.RTPCodecTypeVideo {\n\t\tw.pliThrottle = core.NewThrottle(time.Second)\n\t\tw.sendPLI = func() { w.pliThrottle(func() { rp.WritePLI(track.SSRC()) }) }\n\t\topts = append(opts, jitter.WithPacketLossHandler(func(uint64, uint64) { w.sendPLI() }))\n\t}\n\n\tw.buffer = jitter.NewBuffer(\n\t\tdepacketizer,\n\t\tconf.Latency.JitterBufferLatency,\n\t\tw.onPacket,\n\t\topts...,\n\t)\n\tgo w.start()\n\treturn w, nil\n}\n\nfunc (w *AppWriter) start() {\n\tw.startTime = time.Now()\n\tw.active.Store(true)\n\tif w.csvLogger != nil {\n\t\tgo w.logStats()\n\t}\n\n\tgo func() {\n\t\t<-w.callbacks.BuildReady\n\t\tw.buildReady.Once(func() {\n\t\t\tif !w.active.Load() {\n\t\t\t\tw.callbacks.OnTrackMuted(w.track.ID())\n\t\t\t}\n\t\t})\n\t}()\n\n\tgo w.pushSamples()\n\tfor !w.endStreamSignaled.IsBroken() {\n\t\tw.readNext()\n\t}\n\tw.drainJitterBuffer()\n\n\tselect {\n\tcase <-w.endStreamProcessed.Watch():\n\t\tw.logger.Debugw(\"endStreamProcessed fuse broken\")\n\tcase <-time.After(drainingTimeout):\n\t\tw.logger.Errorw(\"endStreamProcessed not broken after 3 seconds, bug in the draining logic!\", nil,\n\t\t\t\"endStreamSourceProcessed\", w.endStreamSourceProcessed.IsBroken(),\n\t\t\t\"playing\", w.playing.IsBroken(),\n\t\t\t\"active\", w.active.Load(),\n\t\t\t\"lastReceived\", w.lastReceived.Load(),\n\t\t\t\"lastPushed\", w.lastPushed.Load(),\n\t\t\t\"lastPTS\", w.lastPTS,\n\t\t)\n\t}\n\n\t// clean up\n\tif w.playing.IsBroken() {\n\t\tw.callbacks.OnEOSSent()\n\t\tif flow := w.src.EndStream(); flow != gst.FlowOK && flow != gst.FlowFlushing {\n\t\t\tw.logger.Warnw(\"unexpected flow return\", nil, \"flowReturn\", flow.String())\n\t\t}\n\t\tif w.driftHandler != nil {\n\t\t\tw.logger.Debugw(\"processed drift\", \"drift\", w.driftHandler.Processed())\n\t\t}\n\t}\n\n\tw.logger.Infow(\"writer finished\")\n\tif w.csvLogger != nil {\n\t\tw.csvLogger.Close()\n\t}\n\n\tif w.trackSource != nil {\n\t\tw.trackSource.OnKeyframeRequired = nil\n\t}\n\n\tw.finished.Break()\n}\n\nfunc (w *AppWriter) readNext() {\n\t_ = w.track.SetReadDeadline(time.Now().Add(time.Millisecond * 500))\n\tpkt, _, err := w.track.ReadRTP()\n\tif err != nil {\n\t\tw.handleReadError(err)\n\t\treturn\n\t}\n\n\treceivedAt := time.Now()\n\tvar packets []jitter.ExtPacket\n\tif !w.initialized {\n\t\tready, dropped, done := w.PrimeForStart(jitter.ExtPacket{ReceivedAt: receivedAt, Packet: pkt})\n\t\tif dropped > 0 {\n\t\t\tw.stats.packetsDropped.Add(uint64(dropped))\n\t\t\tif w.sendPLI != nil {\n\t\t\t\tw.sendPLI()\n\t\t\t}\n\t\t}\n\t\tif !done {\n\t\t\treturn\n\t\t}\n\t\tw.initialized = true\n\t\tpackets = ready\n\t\tw.lastReceived.Store(ready[len(ready)-1].ReceivedAt)\n\t} else {\n\t\tw.lastReceived.Store(receivedAt)\n\t}\n\n\tif !w.active.Swap(true) {\n\t\t// set track active\n\t\tw.logTrackState(\"track active\")\n\t\tif w.buildReady.IsBroken() {\n\t\t\tw.callbacks.OnTrackUnmuted(w.track.ID())\n\t\t}\n\t\tif w.sendPLI != nil {\n\t\t\tw.sendPLI()\n\t\t}\n\t}\n\tif len(packets) > 0 {\n\t\tw.buffer.PushExtPacketBatch(packets)\n\t} else {\n\t\tw.buffer.Push(pkt)\n\t}\n}\n\nfunc (w *AppWriter) handleReadError(err error) {\n\tvar netErr net.Error\n\tswitch {\n\tcase w.draining.IsBroken():\n\t\tif !w.endStreamSignaled.IsBroken() {\n\t\t\t// Delayed drain in progress (Drain(false) was called, timer pending)\n\t\t\tif (errors.As(err, &netErr) && netErr.Timeout()) || err.Error() == errBufferTooSmall {\n\t\t\t\t// Keep reading until timer fires to preserve pipeline latency timeout\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tw.logger.Debugw(\"handleReadError, breaking endStreamSignaled\", \"error\", err)\n\t\t// connection closed or EOF - no point in trying to read anymore\n\t\tw.endStreamSignaled.Break()\n\t\tw.notifyPushSamples()\n\n\tcase errors.As(err, &netErr) && netErr.Timeout():\n\t\tlastRecv := w.lastReceived.Load()\n\t\tif lastRecv.IsZero() {\n\t\t\tlastRecv = w.startTime\n\t\t}\n\n\t\t// If track was unsubscribed and grace period elapsed, end the stream\n\t\tif w.unsubscribed.IsBroken() && time.Since(lastRecv) > unsubscribedGracePeriod {\n\t\t\tw.logger.Debugw(\"unsubscribed grace period elapsed, ending stream\")\n\t\t\tw.ensureRemovedBeforeDrain()\n\t\t\tw.draining.Break()\n\t\t\tw.endStreamSignaled.Break()\n\t\t\tw.notifyPushSamples()\n\t\t\treturn\n\t\t}\n\n\t\tif !w.active.Load() {\n\t\t\treturn\n\t\t}\n\t\tif w.pub.IsMuted() || time.Since(lastRecv) > w.conf.Latency.JitterBufferLatency {\n\t\t\t// set track inactive\n\t\t\tw.logTrackState(\"track inactive\")\n\t\t\tw.active.Store(false)\n\t\t\tif w.buildReady.IsBroken() {\n\t\t\t\tw.callbacks.OnTrackMuted(w.track.ID())\n\t\t\t}\n\t\t}\n\n\tcase err.Error() == errBufferTooSmall:\n\t\tw.logger.Warnw(\"read error\", err)\n\n\tdefault:\n\t\t// ensure selector switches before EOS propagation to avoid encoder errors\n\t\tw.ensureRemovedBeforeDrain()\n\n\t\tif !errors.Is(err, io.EOF) {\n\t\t\tw.logger.Errorw(\"could not read packet\", err)\n\t\t} else {\n\t\t\tw.logger.Debugw(\"read EOF, signaling end of stream\")\n\t\t}\n\t\tw.draining.Break()\n\t\tw.endStreamSignaled.Break()\n\t\tw.notifyPushSamples()\n\t}\n}\n\nfunc (w *AppWriter) SetTimeProvider(tp gstreamer.TimeProvider) {\n\tw.tpLock.Lock()\n\tif tp == nil {\n\t\ttp = gstreamer.NopTimeProvider()\n\t}\n\tw.timeProvider = tp\n\tw.tpLock.Unlock()\n}\n\nfunc (w *AppWriter) waitFor(ch <-chan struct{}) bool {\n\tif ch == nil {\n\t\treturn true\n\t}\n\tselect {\n\tcase <-ch:\n\t\treturn true\n\tcase <-w.draining.Watch():\n\t\treturn false\n\t}\n}\n\nfunc (w *AppWriter) pipelineRunningTime() (time.Duration, bool) {\n\tw.tpLock.RLock()\n\tprovider := w.timeProvider\n\tw.tpLock.RUnlock()\n\treturn provider.RunningTime()\n}\n\nfunc (w *AppWriter) pipelinePlayhead() (time.Duration, bool) {\n\tw.tpLock.RLock()\n\tprovider := w.timeProvider\n\tw.tpLock.RUnlock()\n\treturn provider.PlayheadPosition()\n}\n\nfunc (w *AppWriter) logTrackState(event string) {\n\tfields := []any{\"timestamp\", time.Since(w.startTime)}\n\tif pipelineTime, ok := w.pipelineRunningTime(); ok {\n\t\tfields = append(fields, \"pipeline_time\", pipelineTime)\n\t}\n\tif playhead, ok := w.pipelinePlayhead(); ok {\n\t\tfields = append(fields, \"playhead\", playhead)\n\t}\n\tw.logger.Debugw(event, fields...)\n}\n\nfunc (w *AppWriter) onKeyframeRequired() {\n\tif w.finished.IsBroken() || w.sendPLI == nil {\n\t\treturn\n\t}\n\tw.sendPLI()\n}\n\nfunc (w *AppWriter) notifyPushSamples() {\n\tw.samplesLock.Lock()\n\tw.samplesCond.Broadcast()\n\tw.samplesLock.Unlock()\n}\n\nfunc (w *AppWriter) onPacket(sample []jitter.ExtPacket) {\n\tw.samplesLock.Lock()\n\titem := &sampleItem{sample, nil}\n\tif w.samplesHead == nil {\n\t\tw.samplesHead = item\n\t\tw.samplesTail = w.samplesHead\n\t\tw.samplesLen = 1\n\t} else {\n\t\tw.samplesTail.next = item\n\t\tw.samplesTail = item\n\t\tw.samplesLen++\n\t}\n\t// drop old samples if queue is overflowing\n\tfor w.samplesLen > cSamplesQueueDepth {\n\t\tif w.samplesHead != nil {\n\t\t\titemToDrop := w.samplesHead\n\t\t\tw.samplesHead = w.samplesHead.next\n\t\t\tw.samplesLen--\n\t\t\tw.stats.packetsDropped.Add(uint64(len(itemToDrop.sample)))\n\t\t\tw.logger.Warnw(\"buffer full, dropping sample\", nil, \"numPackets\", len(itemToDrop.sample))\n\t\t}\n\t\tif w.samplesHead == nil {\n\t\t\tw.samplesTail = nil\n\t\t\tw.samplesLen = 0\n\t\t}\n\t}\n\tw.samplesCond.Broadcast()\n\tw.samplesLock.Unlock()\n}\n\nfunc (w *AppWriter) pushSamples() {\n\tdefer func() {\n\t\tw.endStreamSignaled.Break()\n\t\tw.endStreamProcessed.Break()\n\t\tw.logger.Debugw(\"pushSamples finished\")\n\t}()\n\tif !w.waitFor(w.callbacks.PipelinePaused()) {\n\t\treturn\n\t}\n\n\tif !w.waitFor(w.playing.Watch()) {\n\t\treturn\n\t}\n\n\tfor {\n\t\tw.samplesLock.Lock()\n\t\tfor w.samplesHead == nil && !w.endStreamSourceProcessed.IsBroken() {\n\t\t\tw.samplesCond.Wait()\n\t\t}\n\t\tif w.endStreamSourceProcessed.IsBroken() && w.samplesHead == nil {\n\t\t\tw.samplesLock.Unlock()\n\t\t\treturn\n\t\t}\n\n\t\titem := w.samplesHead\n\t\tw.samplesHead = item.next\n\t\tw.samplesLen--\n\t\tif w.samplesHead == nil {\n\t\t\tw.samplesTail = nil\n\t\t}\n\t\tw.samplesLock.Unlock()\n\n\t\tfor _, pkt := range item.sample {\n\t\t\tif err := w.pushPacket(pkt); err != nil {\n\t\t\t\tif errors.Is(err, errFlowFlushingThreshold) {\n\t\t\t\t\tif w.tryRecoverFromFlushing() {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tw.draining.Break()\n\t\t\t\t\tw.notifyPushSamples()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif !utils.ErrorIsOneOf(err, synchronizer.ErrPacketOutOfOrder, synchronizer.ErrPacketTooOld) {\n\t\t\t\t\tw.draining.Break()\n\t\t\t\t\tw.notifyPushSamples()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (w *AppWriter) pushPacket(pkt jitter.ExtPacket) error {\n\tw.translator.Translate(pkt.Packet)\n\n\t// get PTS\n\tpts, err := w.GetPTS(pkt)\n\tif err != nil {\n\t\tw.stats.packetsDropped.Inc()\n\t\treturn err\n\t}\n\n\tif pts < 0 {\n\t\t// TODO: handle it by sending new gst segment that will reflect the offset\n\t\tw.logger.Debugw(\"negative packet pts, dropping\", \"pts\", pts)\n\t\tw.stats.packetsDropped.Inc()\n\t\treturn nil\n\t}\n\n\tp, err := pkt.Marshal()\n\tif err != nil {\n\t\tw.stats.packetsDropped.Inc()\n\t\tw.logger.Errorw(\"could not marshal packet\", err)\n\t\treturn err\n\t}\n\n\tb := gst.NewBufferFromBytes(p)\n\tb.SetPresentationTimestamp(gst.ClockTime(uint64(pts)))\n\n\tif isDiscontinuity(w.lastPTS, pts) {\n\t\tif w.shouldHandleDiscontinuity() {\n\t\t\tw.logger.Debugw(\"discontinuity detected\", \"pts\", pts, \"lastPTS\", w.lastPTS)\n\t\t\tok := w.src.SendEvent(gst.NewFlushStartEvent())\n\t\t\tif !ok {\n\t\t\t\tw.logger.Errorw(\"failed to send flush start event\", nil)\n\t\t\t}\n\t\t\tok = w.src.SendEvent(gst.NewFlushStopEvent(false))\n\t\t\tif !ok {\n\t\t\t\tw.logger.Errorw(\"failed to send flush stop event\", nil)\n\t\t\t}\n\t\t}\n\t\tb.SetFlags(b.GetFlags() | gst.BufferFlagDiscont)\n\t}\n\n\tif flow := w.src.PushBuffer(b); flow != gst.FlowOK {\n\t\tw.stats.packetsDropped.Inc()\n\t\tif flow == gst.FlowFlushing {\n\t\t\tw.flushingCount++\n\t\t\tif w.flushingCount == 1 {\n\t\t\t\tw.logger.Infow(\"FlowFlushing detected\",\n\t\t\t\t\t\"appsrcState\", w.src.Element.GetCurrentState().String())\n\t\t\t\tif w.flushDotRequested.CompareAndSwap(false, true) {\n\t\t\t\t\tw.callbacks.OnDebugDotRequest(\"appsrc_flush_\" + w.track.ID())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif w.flushingCount >= flushingThreshold {\n\t\t\t\treturn errFlowFlushingThreshold\n\t\t\t}\n\t\t} else {\n\t\t\tw.logger.Infow(\"unexpected flow return\", \"flow\", flow,\n\t\t\t\t\"appsrcState\", w.src.Element.GetCurrentState().String())\n\t\t}\n\t} else if w.flushingCount > 0 {\n\t\tw.logger.Infow(\"FlowFlushing cleared after successful push\",\n\t\t\t\"previousCount\", w.flushingCount)\n\t\tw.flushingCount = 0\n\t}\n\n\tw.lastPushed.Store(time.Now())\n\tw.lastPTS = pts\n\tw.maybeCheckPipelineLag(pts)\n\treturn nil\n}\n\n// tryRecoverFromFlushing attempts to recover from persistent FlowFlushing by\n// removing the stuck source bin and replacing it with a new one.\n// Returns true if recovery succeeded and pushing can continue.\nfunc (w *AppWriter) tryRecoverFromFlushing() bool {\n\tif w.draining.IsBroken() {\n\t\tw.logger.Debugw(\"skipping FlowFlushing recovery: draining\")\n\t\treturn false\n\t}\n\tif w.unsubscribed.IsBroken() {\n\t\tw.logger.Debugw(\"skipping FlowFlushing recovery: unsubscribed\")\n\t\treturn false\n\t}\n\tif w.endStreamSignaled.IsBroken() {\n\t\tw.logger.Debugw(\"skipping FlowFlushing recovery: end stream signaled\")\n\t\treturn false\n\t}\n\n\tif w.srcResetCount >= maxSrcResets {\n\t\tw.logger.Warnw(\"max FlowFlushing recovery attempts reached, giving up\", nil,\n\t\t\t\"attempts\", w.srcResetCount)\n\t\treturn false\n\t}\n\n\tw.logger.Infow(\"attempting FlowFlushing recovery via source bin reset\",\n\t\t\"flushingCount\", w.flushingCount, \"attempt\", w.srcResetCount+1)\n\n\toldAppSrc := w.trackSource.AppSrc\n\n\t// Call the builder layer to force-remove the old bin and add a new one.\n\t// The callback updates ts.AppSrc to the new appsrc on success.\n\tif err := w.callbacks.OnSourceBinReset(w.trackSource); err != nil {\n\t\tw.logger.Errorw(\"FlowFlushing recovery failed\", err)\n\t\treturn false\n\t}\n\n\tif w.trackSource.AppSrc == oldAppSrc {\n\t\tw.logger.Errorw(\"FlowFlushing recovery: no handler replaced the appsrc\", nil)\n\t\treturn false\n\t}\n\n\tw.src = w.trackSource.AppSrc\n\tw.flushingCount = 0\n\tw.srcResetCount++\n\n\tw.logger.Infow(\"FlowFlushing recovery succeeded, continuing with new appsrc\",\n\t\t\"totalResets\", w.srcResetCount)\n\treturn true\n}\n\nfunc (w *AppWriter) maybeCheckPipelineLag(pts time.Duration) {\n\tif pts-w.lastPipelineCheckPTS < pipelineCheckInterval {\n\t\treturn\n\t}\n\tpipelineTime, ok := w.pipelineRunningTime()\n\tif !ok {\n\t\treturn\n\t}\n\tw.lastPipelineCheckPTS = pts\n\tif pipelineTime <= w.conf.Latency.AudioMixerLatency {\n\t\treturn\n\t}\n\n\tif pts < pipelineTime-w.conf.Latency.AudioMixerLatency {\n\t\tw.logger.Warnw(\n\t\t\t\"packet PTS too far in the past compared to the pipeline, mixer will drop the buffer!\",\n\t\t\tnil,\n\t\t\t\"pts\", pts,\n\t\t\t\"pipelineRunningTime\", pipelineTime,\n\t\t)\n\t}\n}\n\nfunc (w *AppWriter) Playing() {\n\tw.playing.Break()\n}\n\n// Drain blocks until finished\nfunc (w *AppWriter) Drain(force bool) {\n\tw.draining.Once(func() {\n\t\tw.logger.Debugw(\"draining\", \"force\", force)\n\n\t\tendStream := func() {\n\t\t\tw.endStreamSignaled.Break()\n\t\t\tw.notifyPushSamples()\n\t\t}\n\n\t\tif force || !w.active.Load() {\n\t\t\tendStream()\n\t\t} else {\n\t\t\ttime.AfterFunc(w.conf.Latency.PipelineLatency, endStream)\n\t\t}\n\t})\n\n\t<-w.finished.Watch()\n\tw.logger.Debugw(\"finished fuse broken\")\n\tw.synchronizer.RemoveTrack(w.track.ID())\n}\n\n// OnUnsubscribed signals that the track was unsubscribed but allows the reader\n// to continue reading until an error occurs or grace period elapses.\n// This allows any remaining buffers in flight from the SFU to be processed.\nfunc (w *AppWriter) OnUnsubscribed() {\n\tw.unsubscribed.Break()\n\tw.logger.Debugw(\"track unsubscribed, continuing to read until error or grace period\")\n}\n\n// Finished returns a channel that is closed when the writer has finished.\nfunc (w *AppWriter) Finished() <-chan struct{} {\n\treturn w.finished.Watch()\n}\n\nfunc (w *AppWriter) logStats() {\n\tended := w.endStreamSignaled.Watch()\n\tticker := time.NewTicker(time.Second * 10)\n\tdefer ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ended:\n\t\t\tstats := w.getStats()\n\t\t\tw.csvLogger.Write(stats)\n\t\t\tw.csvLogger.Close()\n\t\t\tw.logger.Infow(\"appwriter stats \", \"stats\", stats, \"requestType\", w.conf.RequestType)\n\t\t\treturn\n\n\t\tcase <-ticker.C:\n\t\t\tstats := w.getStats()\n\t\t\tw.csvLogger.Write(stats)\n\t\t}\n\t}\n}\n\nfunc (w *AppWriter) getStats() *logging.TrackStats {\n\tstats := w.buffer.Stats()\n\treturn &logging.TrackStats{\n\t\tTimestamp:       time.Now().Format(time.DateTime),\n\t\tPacketsReceived: stats.PacketsPushed,\n\t\tPaddingReceived: stats.PaddingPushed,\n\t\tLastReceived:    w.lastReceived.Load().Format(time.DateTime),\n\t\tPacketsDropped:  stats.PacketsDropped + w.stats.packetsDropped.Load(),\n\t\tPacketsPushed:   stats.PacketsPopped,\n\t\tSamplesPushed:   stats.SamplesPopped,\n\t\tLastPushed:      w.lastPushed.Load().Format(time.DateTime),\n\t\tDrift:           w.drift.Load(),\n\t\tMaxDrift:        w.maxDrift.Load(),\n\t}\n}\n\nfunc (w *AppWriter) updateDrift(drift time.Duration) {\n\tw.drift.Store(drift)\n\tfor {\n\t\tmaxDrift := w.maxDrift.Load()\n\t\tif drift.Abs() <= maxDrift.Abs() {\n\t\t\tbreak\n\t\t}\n\t\tif w.maxDrift.CompareAndSwap(maxDrift, drift) {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (w *AppWriter) shouldHandleDiscontinuity() bool {\n\treturn w.track.Kind() == webrtc.RTPCodecTypeAudio && w.conf.AudioTempoController.Enabled\n}\n\nfunc (w *AppWriter) TrackKind() webrtc.RTPCodecType {\n\treturn w.track.Kind()\n}\n\nfunc (w *AppWriter) drainJitterBuffer() {\n\tw.logger.Debugw(\"draining jitter buffer\")\n\tw.buffer.Close()\n\tw.buffer.Flush()\n\tw.logger.Debugw(\"jitter buffer flushed\")\n\n\tw.endStreamSourceProcessed.Break()\n\tw.notifyPushSamples()\n}\n\nfunc isDiscontinuity(lastPTS time.Duration, pts time.Duration) bool {\n\treturn pts > lastPTS+discontinuityTolerance\n}\n\nfunc (w *AppWriter) shouldRemoveBeforeDrain() bool {\n\treturn w.track.Kind() == webrtc.RTPCodecTypeVideo &&\n\t\t(w.conf.RequestType == types.RequestTypeParticipant || w.conf.RequestType == types.RequestTypeRoomComposite || w.conf.RequestType == types.RequestTypeMedia)\n}\n\nfunc (w *AppWriter) ensureRemovedBeforeDrain() {\n\tif w.shouldRemoveBeforeDrain() && w.removalRequested.CompareAndSwap(false, true) {\n\t\tw.callbacks.OnTrackRemoved(w.track.ID())\n\t}\n}\n\ntype G711Packet struct{}\n\nfunc (p *G711Packet) Unmarshal(packet []byte) ([]byte, error) {\n\t// G.711 payload is just the raw samples, return as-is (same as OpusPacket)\n\tif packet == nil {\n\t\treturn nil, errors.New(\"nil packet\")\n\t}\n\treturn packet, nil\n}\n\nfunc (p *G711Packet) IsPartitionHead(_ []byte) bool {\n\treturn true\n}\n\nfunc (p *G711Packet) IsPartitionTail(_ bool, _ []byte) bool {\n\treturn true\n}\n"
  },
  {
    "path": "pkg/pipeline/source/sdk/translator.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage sdk\n\nimport (\n\t\"time\"\n\n\t\"github.com/pion/rtp\"\n\n\t\"github.com/livekit/livekit-server/pkg/sfu/buffer\"\n\t\"github.com/livekit/livekit-server/pkg/sfu/codecmunger\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\ntype Translator interface {\n\tTranslate(*rtp.Packet)\n}\n\n// VP8\n\ntype VP8Translator struct {\n\tlogger logger.Logger\n\n\tfirstPktPushed bool\n\tlastSN         uint16\n\tvp8Munger      *codecmunger.VP8\n}\n\nfunc NewVP8Translator(logger logger.Logger) *VP8Translator {\n\treturn &VP8Translator{\n\t\tlogger:    logger,\n\t\tvp8Munger: codecmunger.NewVP8(logger),\n\t}\n}\n\nfunc (t *VP8Translator) Translate(pkt *rtp.Packet) {\n\tdefer func() {\n\t\tt.lastSN = pkt.SequenceNumber\n\t}()\n\n\tif len(pkt.Payload) == 0 {\n\t\treturn\n\t}\n\n\tvp8Packet := buffer.VP8{}\n\tif err := vp8Packet.Unmarshal(pkt.Payload); err != nil {\n\t\tt.logger.Warnw(\"could not unmarshal VP8 packet\", err)\n\t\treturn\n\t}\n\n\textPkt := &buffer.ExtPacket{\n\t\tPacket:   pkt,\n\t\tArrival:  time.Now().UnixNano(),\n\t\tPayload:  vp8Packet,\n\t\tIsKeyFrame: vp8Packet.IsKeyFrame,\n\t\tVideoLayer: buffer.VideoLayer{\n\t\t\tSpatial:  -1,\n\t\t\tTemporal: int32(vp8Packet.TID),\n\t\t},\n\t}\n\n\tif !t.firstPktPushed {\n\t\tt.firstPktPushed = true\n\t\tt.vp8Munger.SetLast(extPkt)\n\t} else {\n\t\tpayload := make([]byte, 1460)\n\t\tincomingHeaderSize, header, err := t.vp8Munger.UpdateAndGet(extPkt, false, pkt.SequenceNumber != t.lastSN+1, extPkt.Temporal)\n\t\tif err != nil {\n\t\t\tt.logger.Warnw(\"could not update VP8 packet\", err)\n\t\t\treturn\n\t\t}\n\t\tcopy(payload, header)\n\t\tn := copy(payload[len(header):], extPkt.Packet.Payload[incomingHeaderSize:])\n\t\tpkt.Payload = payload[:len(header)+n]\n\t}\n}\n\n// Null\n\ntype NullTranslator struct{}\n\nfunc NewNullTranslator() Translator {\n\treturn &NullTranslator{}\n}\n\nfunc (t *NullTranslator) Translate(_ *rtp.Packet) {}\n"
  },
  {
    "path": "pkg/pipeline/source/sdk.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage source\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/frostbyte73/core\"\n\t\"github.com/linkdata/deadlock\"\n\t\"github.com/pion/webrtc/v4\"\n\t\"go.uber.org/atomic\"\n\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\tlksdk \"github.com/livekit/server-sdk-go/v2\"\n\t\"github.com/livekit/server-sdk-go/v2/pkg/synchronizer\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/types\"\n)\n\nconst (\n\tsubscriptionTimeout = time.Second * 30\n)\n\ntype SDKSource struct {\n\t*config.PipelineConfig\n\tcallbacks *gstreamer.Callbacks\n\n\troom *lksdk.Room\n\tsync *synchronizer.Synchronizer\n\n\tmu                   deadlock.Mutex\n\tinitialized          core.Fuse\n\tfilenameReplacements map[string]string\n\taudioChannels        map[string]livekit.AudioChannel\n\n\tworkersMu deadlock.RWMutex\n\tworkers   map[string]*trackWorker\n\n\t// subLock prevents a race where a subscription starts during init completion.\n\t// Without it, the subscription could see \"not yet initialized\", then init completes,\n\t// leaving the track orphaned (missed by both pipeline build and dynamic add).\n\tsubLock deadlock.RWMutex\n\n\tclosing atomic.Bool\n\tactive  atomic.Int32\n\tclosed  core.Fuse\n\n\tstartRecording core.Fuse\n\tendRecording   core.Fuse\n\n\ttimeProvider   atomic.Pointer[gstreamer.TimeProvider]\n\tinitResultChan atomic.Pointer[chan subscriptionResult]\n}\n\ntype subscriptionResult struct {\n\ttrackID string\n\terr     error\n}\n\nfunc NewSDKSource(ctx context.Context, p *config.PipelineConfig, callbacks *gstreamer.Callbacks) (*SDKSource, error) {\n\t_, span := tracer.Start(ctx, \"SDKInput.New\")\n\tdefer span.End()\n\n\ts := &SDKSource{\n\t\tPipelineConfig:       p,\n\t\tcallbacks:            callbacks,\n\t\tfilenameReplacements: make(map[string]string),\n\t\taudioChannels:        make(map[string]livekit.AudioChannel),\n\t\tworkers:              make(map[string]*trackWorker),\n\t}\n\tlogger.Debugw(\"latency config\", \"latency\", p.Latency)\n\n\topts := []synchronizer.SynchronizerOption{\n\t\tsynchronizer.WithMaxTsDiff(p.Latency.RTPMaxAllowedTsDiff),\n\t\tsynchronizer.WithMaxDriftAdjustment(p.Latency.RTPMaxDriftAdjustment),\n\t\tsynchronizer.WithDriftAdjustmentWindowPercent(p.Latency.RTPDriftAdjustmentWindowPercent),\n\t\tsynchronizer.WithOldPacketThreshold(p.Latency.OldPacketThreshold),\n\t\tsynchronizer.WithOnStarted(func() {\n\t\t\ts.startRecording.Break()\n\t\t}),\n\t}\n\n\tif p.RequestType == types.RequestTypeRoomComposite || p.RequestType == types.RequestTypeTemplate {\n\t\t// Enable Packet Burst Estimator for Room Composite requests\n\t\topts = append(opts, synchronizer.WithStartGate())\n\t}\n\n\t// time provider is not available yet, will be set later\n\t// add some leeway to the mixer latency\n\topts = append(opts, synchronizer.WithMediaRunningTime(nil, p.Latency.AudioMixerLatency+200*time.Millisecond))\n\n\tif s.shouldEnableOneShotSenderReportSync() {\n\t\topts = append(opts, synchronizer.WithSenderReportSyncMode(synchronizer.SenderReportSyncModeOneShot))\n\t\topts = append(opts, synchronizer.WithOneShotDriftCorrectionThreshold(\n\t\t\ttime.Duration(float64(p.Latency.AudioMixerLatency)*0.8),\n\t\t))\n\t} else if s.shouldDisableAudioPTSAdjustment() {\n\t\topts = append(opts, synchronizer.WithSenderReportSyncMode(synchronizer.SenderReportSyncModeWithoutRebase))\n\t\topts = append(opts, synchronizer.WithAudioPTSAdjustmentDisabled())\n\t} else {\n\t\topts = append(opts, synchronizer.WithSenderReportSyncMode(synchronizer.SenderReportSyncModeRebase))\n\t}\n\n\tif p.AudioTempoController.Enabled {\n\t\tlogger.Debugw(\"audio tempo controller enabled\", \"adjustmentRate\", p.AudioTempoController.AdjustmentRate)\n\t}\n\n\ts.sync = synchronizer.NewSynchronizerWithOptions(\n\t\topts...,\n\t)\n\n\tif err := s.joinRoom(); err != nil {\n\t\ts.disconnectRoom()\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}\n\nfunc (s *SDKSource) StartRecording() <-chan struct{} {\n\treturn s.startRecording.Watch()\n}\n\nfunc (s *SDKSource) EndRecording() <-chan struct{} {\n\treturn s.endRecording.Watch()\n}\n\nfunc (s *SDKSource) Playing(trackID string) {\n\ts.workersMu.RLock()\n\tw := s.workers[trackID]\n\ts.workersMu.RUnlock()\n\n\tif w == nil {\n\t\treturn\n\t}\n\n\tgen := w.generation.Load()\n\ts.submitOp(trackID, Operation{Type: OpPlaying, Generation: gen})\n}\n\nfunc (s *SDKSource) GetStartedAt() int64 {\n\treturn s.sync.GetStartedAt()\n}\n\nfunc (s *SDKSource) GetEndedAt() int64 {\n\treturn s.sync.GetEndedAt()\n}\n\nfunc (s *SDKSource) CloseWriters() {\n\ts.closed.Once(func() {\n\t\ts.closing.Store(true)\n\t\ts.sync.End()\n\n\t\ts.workersMu.RLock()\n\t\tworkers := make([]*trackWorker, 0, len(s.workers))\n\t\tfor _, w := range s.workers {\n\t\t\tworkers = append(workers, w)\n\t\t}\n\t\ts.workersMu.RUnlock()\n\n\t\tfor _, w := range workers {\n\t\t\tselect {\n\t\t\tcase w.opChan <- Operation{Type: OpClose}:\n\t\t\tcase <-w.done.Watch():\n\t\t\t\t// already exited\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc (s *SDKSource) StreamStopped(elementName string) {\n\ttrackID := strings.TrimPrefix(elementName, \"app_\")\n\n\t// Only send finished if we have a worker for this track\n\ts.workersMu.RLock()\n\t_, exists := s.workers[trackID]\n\ts.workersMu.RUnlock()\n\n\tif !exists {\n\t\treturn // No worker for this track, nothing to clean up\n\t}\n\n\ts.submitOp(trackID, Operation{Type: OpFinished})\n}\n\nfunc (s *SDKSource) Close() {\n\ts.disconnectRoom()\n}\n\nfunc (s *SDKSource) SetTimeProvider(tp gstreamer.TimeProvider) {\n\ts.timeProvider.Store(&tp)\n\n\tif tp != nil {\n\t\ts.sync.SetMediaRunningTime(tp.RunningTime)\n\t} else {\n\t\ts.sync.SetMediaRunningTime(nil)\n\t}\n\n\ts.workersMu.RLock()\n\tfor _, w := range s.workers {\n\t\tselect {\n\t\tcase w.opChan <- Operation{Type: OpSetTimeProvider, TimeProvider: tp}:\n\t\tdefault:\n\t\t\tlogger.Warnw(\"failed to send SetTimeProvider, channel full\", nil, \"trackID\", w.trackID)\n\t\t}\n\t}\n\ts.workersMu.RUnlock()\n}\n\n// ----- Subscriptions -----\n\nfunc (s *SDKSource) joinRoom() error {\n\tcb := &lksdk.RoomCallback{\n\t\tParticipantCallback: lksdk.ParticipantCallback{\n\t\t\tOnTrackSubscribed:   s.onTrackSubscribed,\n\t\t\tOnTrackMuted:        s.onTrackMuted,\n\t\t\tOnTrackUnmuted:      s.onTrackUnmuted,\n\t\t\tOnTrackUnsubscribed: s.onTrackUnsubscribed,\n\t\t},\n\t\tOnDisconnected: s.onDisconnected,\n\t}\n\n\tswitch s.RequestType {\n\tcase types.RequestTypeRoomComposite, types.RequestTypeTemplate, types.RequestTypeMedia:\n\t\tcb.OnTrackPublished = s.onTrackPublished\n\tcase types.RequestTypeParticipant:\n\t\tcb.OnTrackPublished = s.onTrackPublished\n\t\tcb.OnParticipantDisconnected = s.onParticipantDisconnected\n\t}\n\n\tlogger.Debugw(\"connecting to room\")\n\troom, err := lksdk.ConnectToRoomWithToken(s.WsUrl, s.Token, cb, lksdk.WithAutoSubscribe(false))\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.room = room\n\n\tvar fileIdentifier string\n\tvar w, h uint32\n\tswitch s.RequestType {\n\tcase types.RequestTypeRoomComposite:\n\t\tfileIdentifier = s.room.Name()\n\t\t// room_name and room_id are already handled as replacements\n\t\terr = s.awaitRoomTracks()\n\n\tcase types.RequestTypeTemplate:\n\t\tif s.Info.RoomName != \"\" {\n\t\t\tfileIdentifier = s.Info.RoomName\n\t\t} else {\n\t\t\tfileIdentifier = s.room.Name()\n\t\t\ts.filenameReplacements[\"{room_name}\"] = s.room.Name()\n\t\t}\n\n\t\terr = s.awaitRoomTracks()\n\n\tcase types.RequestTypeParticipant:\n\t\tfileIdentifier = s.Identity\n\t\ts.filenameReplacements[\"{publisher_identity}\"] = s.Identity\n\t\tw, h, err = s.awaitParticipantTracks(s.Identity)\n\n\tcase types.RequestTypeTrackComposite:\n\t\tfileIdentifier = s.Info.RoomName\n\t\ttracks := make(map[string]struct{})\n\t\tif s.AudioEnabled {\n\t\t\ttracks[s.AudioTrackID] = struct{}{}\n\t\t}\n\t\tif s.VideoEnabled {\n\t\t\ttracks[s.VideoTrackID] = struct{}{}\n\t\t}\n\t\tw, h, err = s.awaitTracks(tracks)\n\n\tcase types.RequestTypeTrack:\n\t\tfileIdentifier = s.TrackID\n\t\tw, h, err = s.awaitTracks(map[string]struct{}{s.TrackID: {}})\n\n\tcase types.RequestTypeMedia:\n\t\tif s.Info.RoomName != \"\" {\n\t\t\tfileIdentifier = s.Info.RoomName\n\t\t} else {\n\t\t\tfileIdentifier = s.room.Name()\n\t\t\ts.filenameReplacements[\"{room_name}\"] = s.room.Name()\n\t\t}\n\t\tw, h, err = s.awaitMediaTracks()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = s.UpdateInfoFromSDK(fileIdentifier, s.filenameReplacements, w, h); err != nil {\n\t\tlogger.Errorw(\"could not update file params\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *SDKSource) startAwaitingTracks(expectedCount int) <-chan subscriptionResult {\n\tch := make(chan subscriptionResult, expectedCount)\n\ts.initResultChan.Store(&ch)\n\treturn ch\n}\n\n// StopAwaitingTracks - called after init complete or timeout\nfunc (s *SDKSource) stopAwaitingTracks() {\n\ts.initResultChan.Store(nil) // just nil out, don't close\n}\n\nfunc (s *SDKSource) completeInit() {\n\ts.subLock.Lock()\n\tdefer s.subLock.Unlock()\n\ts.initialized.Break()\n}\n\n// getInitResultChan returns the current init result channel (nil after init complete)\nfunc (s *SDKSource) getInitResultChan() chan<- subscriptionResult {\n\tif ptr := s.initResultChan.Load(); ptr != nil {\n\t\treturn *ptr\n\t}\n\treturn nil\n}\n\n// sendInitResult sends result to the init channel if non-nil (non-blocking to avoid deadlock)\nfunc (s *SDKSource) sendInitResult(ch chan<- subscriptionResult, trackID string, err error) {\n\tif ch == nil {\n\t\treturn\n\t}\n\tselect {\n\tcase ch <- subscriptionResult{trackID: trackID, err: err}:\n\tdefault:\n\t\tlogger.Warnw(\"failed to send init result, channel full\", nil, \"trackID\", trackID)\n\t}\n}\n\nfunc (s *SDKSource) awaitRoomTracks() error {\n\t// await expected subscriptions\n\texpected := 0\n\tfor _, rp := range s.room.GetRemoteParticipants() {\n\t\tpubs := rp.TrackPublications()\n\t\tfor _, pub := range pubs {\n\t\t\tif s.shouldSubscribe(pub) {\n\t\t\t\texpected++\n\t\t\t}\n\t\t}\n\t}\n\tif err := s.awaitExpected(expected); err != nil {\n\t\treturn err\n\t}\n\n\ts.completeInit()\n\treturn nil\n}\n\nfunc (s *SDKSource) awaitMediaTracks() (uint32, uint32, error) {\n\t// Phase 1: Collect prerequisites from config\n\trequiredParticipants := make(map[string]struct{})\n\trequiredTracks := make(map[string]struct{})\n\n\tif s.Identity != \"\" {\n\t\trequiredParticipants[s.Identity] = struct{}{}\n\t}\n\tif s.VideoTrackID != \"\" {\n\t\trequiredTracks[s.VideoTrackID] = struct{}{}\n\t}\n\tfor _, route := range s.AudioRoutes {\n\t\tif route.Match.TrackID != \"\" {\n\t\t\trequiredTracks[route.Match.TrackID] = struct{}{}\n\t\t}\n\t\tif route.Match.ParticipantIdentity != \"\" {\n\t\t\trequiredParticipants[route.Match.ParticipantIdentity] = struct{}{}\n\t\t}\n\t}\n\n\t// Phase 2: Wait for prerequisites with shared deadline\n\tdeadline := time.Now().Add(subscriptionTimeout)\n\n\tfor identity := range requiredParticipants {\n\t\tif _, err := s.getParticipant(identity, deadline); err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t}\n\tfor trackID := range requiredTracks {\n\t\tif err := s.awaitTrackPublication(trackID, deadline); err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t}\n\n\t// Phase 3: Count all matching subscriptions and soft-wait\n\texpected := 0\n\tfor _, rp := range s.room.GetRemoteParticipants() {\n\t\tfor _, pub := range rp.TrackPublications() {\n\t\t\tif s.shouldSubscribeMedia(pub, rp) {\n\t\t\t\texpected++\n\t\t\t}\n\t\t}\n\t}\n\tif err := s.awaitExpected(expected); err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\t// Phase 4: Get video dimensions from subscribed tracks\n\tvar w, h uint32\n\tfor _, rp := range s.room.GetRemoteParticipants() {\n\t\tfor _, pub := range rp.TrackPublications() {\n\t\t\tif pub.IsSubscribed() && pub.Kind() == lksdk.TrackKindVideo {\n\t\t\t\tif info := pub.TrackInfo(); info != nil {\n\t\t\t\t\tw = info.Width\n\t\t\t\t\th = info.Height\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\ts.completeInit()\n\treturn w, h, nil\n}\n\nfunc (s *SDKSource) awaitParticipantTracks(identity string) (uint32, uint32, error) {\n\trp, err := s.getParticipant(identity, time.Now().Add(subscriptionTimeout))\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\t// await expected subscriptions\n\tpubs := rp.TrackPublications()\n\texpected := 0\n\tfor _, pub := range pubs {\n\t\tif s.shouldSubscribe(pub) {\n\t\t\texpected++\n\t\t}\n\t}\n\tif err = s.awaitExpected(expected); err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\t// get dimensions after subscribing so that track info exists\n\tvar w, h uint32\n\tfor _, t := range pubs {\n\t\tif t.TrackInfo().Type == livekit.TrackType_VIDEO && t.IsSubscribed() {\n\t\t\tw = t.TrackInfo().Width\n\t\t\th = t.TrackInfo().Height\n\t\t}\n\t}\n\n\ts.completeInit()\n\treturn w, h, nil\n}\n\nfunc (s *SDKSource) awaitExpected(expected int) error {\n\tif expected == 0 {\n\t\treturn nil\n\t}\n\n\tresultChan := s.startAwaitingTracks(expected)\n\tdefer s.stopAwaitingTracks()\n\n\tsubscribed := 0\n\tdeadline := time.After(time.Second * 3)\n\n\tfor subscribed < expected {\n\t\tselect {\n\t\tcase sub := <-resultChan:\n\t\t\tif sub.err != nil {\n\t\t\t\treturn sub.err\n\t\t\t}\n\t\t\tsubscribed++\n\t\tcase <-deadline:\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *SDKSource) getParticipant(identity string, deadline time.Time) (*lksdk.RemoteParticipant, error) {\n\tfor time.Now().Before(deadline) {\n\t\tfor _, p := range s.room.GetRemoteParticipants() {\n\t\t\tif p.Identity() == identity {\n\t\t\t\treturn p, nil\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\treturn nil, errors.ErrParticipantNotFound(identity)\n}\n\nfunc (s *SDKSource) awaitTrackPublication(trackID string, deadline time.Time) error {\n\tfor time.Now().Before(deadline) {\n\t\tfor _, p := range s.room.GetRemoteParticipants() {\n\t\t\tfor _, pub := range p.TrackPublications() {\n\t\t\t\tif pub.SID() == trackID {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\treturn errors.ErrTrackNotFound(trackID)\n}\n\nfunc (s *SDKSource) awaitTracks(expecting map[string]struct{}) (uint32, uint32, error) {\n\ttrackCount := len(expecting)\n\tif trackCount == 0 {\n\t\ts.completeInit()\n\t\treturn 0, 0, nil\n\t}\n\n\twaiting := make(map[string]struct{})\n\tfor trackID := range expecting {\n\t\twaiting[trackID] = struct{}{}\n\t}\n\n\t// Set up init coordination - processIdleOp will send results here\n\tresultChan := s.startAwaitingTracks(trackCount)\n\tdefer s.stopAwaitingTracks()\n\n\tdeadline := time.After(subscriptionTimeout)\n\ttracks, err := s.subscribeToTracks(expecting, deadline)\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tfor i := 0; i < trackCount; i++ {\n\t\tselect {\n\t\tcase result := <-resultChan:\n\t\t\tif result.err != nil {\n\t\t\t\treturn 0, 0, result.err\n\t\t\t}\n\t\t\tdelete(waiting, result.trackID)\n\t\tcase <-deadline:\n\t\t\tfor trackID := range waiting {\n\t\t\t\treturn 0, 0, errors.ErrTrackNotFound(trackID)\n\t\t\t}\n\t\t}\n\t}\n\n\tvar w, h uint32\n\tfor _, t := range tracks {\n\t\tif t.TrackInfo().Type == livekit.TrackType_VIDEO {\n\t\t\tw = t.TrackInfo().Width\n\t\t\th = t.TrackInfo().Height\n\t\t}\n\t}\n\n\ts.completeInit()\n\treturn w, h, nil\n}\n\nfunc (s *SDKSource) subscribeToTracks(expecting map[string]struct{}, deadline <-chan time.Time) ([]lksdk.TrackPublication, error) {\n\tvar tracks []lksdk.TrackPublication\n\n\tfor {\n\t\tselect {\n\t\tcase <-deadline:\n\t\t\tfor trackID := range expecting {\n\t\t\t\treturn nil, errors.ErrTrackNotFound(trackID)\n\t\t\t}\n\t\tdefault:\n\t\t\tfor _, p := range s.room.GetRemoteParticipants() {\n\t\t\t\tfor _, track := range p.TrackPublications() {\n\t\t\t\t\ttrackID := track.SID()\n\t\t\t\t\tif _, ok := expecting[trackID]; ok {\n\t\t\t\t\t\tif trackID == s.AudioTrackID && track.Kind() == lksdk.TrackKindVideo {\n\t\t\t\t\t\t\treturn nil, errors.ErrInvalidInput(\"audio_track_id\")\n\t\t\t\t\t\t} else if trackID == s.VideoTrackID && track.Kind() == lksdk.TrackKindAudio {\n\t\t\t\t\t\t\treturn nil, errors.ErrInvalidInput(\"video_track_id\")\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif err := s.subscribe(track); err != nil {\n\t\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttracks = append(tracks, track)\n\n\t\t\t\t\t\tdelete(expecting, track.SID())\n\t\t\t\t\t\tif len(expecting) == 0 {\n\t\t\t\t\t\t\treturn tracks, nil\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t}\n\t}\n}\n\nfunc (s *SDKSource) subscribe(track lksdk.TrackPublication) error {\n\tif pub, ok := track.(*lksdk.RemoteTrackPublication); ok {\n\t\tif pub.IsSubscribed() {\n\t\t\treturn nil\n\t\t}\n\n\t\tlogger.Infow(\"subscribing to track\", \"trackID\", track.SID())\n\n\t\tpub.OnRTCP(s.sync.OnRTCP)\n\n\t\treturn pub.SetSubscribed(true)\n\t}\n\n\treturn errors.ErrSubscriptionFailed\n}\n\n// ----- Callbacks -----\n\nfunc (s *SDKSource) onTrackSubscribed(track *webrtc.TrackRemote, pub *lksdk.RemoteTrackPublication, rp *lksdk.RemoteParticipant) {\n\t// After init, only participant and room composite requests accept new tracks\n\tif s.shouldSkipTrackSubscriptions() {\n\t\treturn\n\t}\n\n\ttrackID := pub.SID()\n\n\t// Capture result channel at submission time (nil after init complete)\n\tresultChan := s.getInitResultChan()\n\n\ts.submitOp(trackID, Operation{\n\t\tType:              OpSubscribe,\n\t\tTrack:             track,\n\t\tPub:               pub,\n\t\tRemoteParticipant: rp,\n\t\tResultChan:        resultChan,\n\t})\n}\n\nfunc (s *SDKSource) onTrackPublished(pub *lksdk.RemoteTrackPublication, rp *lksdk.RemoteParticipant) {\n\tif s.RequestType != types.RequestTypeParticipant &&\n\t\ts.RequestType != types.RequestTypeRoomComposite &&\n\t\ts.RequestType != types.RequestTypeTemplate &&\n\t\ts.RequestType != types.RequestTypeMedia {\n\t\treturn\n\t}\n\n\tif s.RequestType == types.RequestTypeParticipant && rp.Identity() != s.Identity {\n\t\treturn\n\t}\n\n\tvar shouldSub bool\n\tif s.RequestType == types.RequestTypeMedia {\n\t\tshouldSub = s.shouldSubscribeMedia(pub, rp)\n\t} else {\n\t\tshouldSub = s.shouldSubscribe(pub)\n\t}\n\n\tif shouldSub {\n\t\tif err := s.subscribe(pub); err != nil {\n\t\t\tlogger.Errorw(\"failed to subscribe to track\", err, \"trackID\", pub.SID())\n\t\t}\n\t} else {\n\t\tlogger.Infow(\"ignoring track\", \"reason\", fmt.Sprintf(\"source %s\", pub.Source()))\n\t}\n}\n\nfunc (s *SDKSource) shouldSubscribe(pub lksdk.TrackPublication) bool {\n\tswitch s.RequestType {\n\tcase types.RequestTypeParticipant:\n\t\tswitch pub.Source() {\n\t\tcase livekit.TrackSource_CAMERA, livekit.TrackSource_MICROPHONE:\n\t\t\treturn !s.ScreenShare\n\t\tdefault:\n\t\t\treturn s.ScreenShare\n\t\t}\n\tcase types.RequestTypeRoomComposite, types.RequestTypeTemplate:\n\t\tswitch pub.Kind() {\n\t\tcase lksdk.TrackKindAudio:\n\t\t\treturn s.AudioEnabled\n\t\tcase lksdk.TrackKindVideo:\n\t\t\treturn s.VideoEnabled\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (s *SDKSource) shouldSubscribeMedia(pub lksdk.TrackPublication, rp *lksdk.RemoteParticipant) bool {\n\tif s.matchesMediaVideo(pub, rp) {\n\t\treturn true\n\t}\n\tif route := s.matchesAudioRoute(pub, rp); route != nil {\n\t\ts.mu.Lock()\n\t\ts.audioChannels[pub.SID()] = route.Channel\n\t\ts.mu.Unlock()\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (s *SDKSource) matchesAudioRoute(pub lksdk.TrackPublication, rp *lksdk.RemoteParticipant) *config.AudioRouteConfig {\n\tif pub.Kind() != lksdk.TrackKindAudio {\n\t\treturn nil\n\t}\n\tfor i := range s.AudioRoutes {\n\t\troute := &s.AudioRoutes[i]\n\t\tswitch {\n\t\tcase route.Match.TrackID != \"\":\n\t\t\tif pub.SID() == route.Match.TrackID {\n\t\t\t\treturn route\n\t\t\t}\n\t\tcase route.Match.ParticipantIdentity != \"\":\n\t\t\tif rp.Identity() == route.Match.ParticipantIdentity {\n\t\t\t\treturn route\n\t\t\t}\n\t\tcase route.Match.ParticipantKind != nil:\n\t\t\tif rp.Kind() == *route.Match.ParticipantKind {\n\t\t\t\treturn route\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *SDKSource) matchesMediaVideo(pub lksdk.TrackPublication, rp *lksdk.RemoteParticipant) bool {\n\tif pub.Kind() != lksdk.TrackKindVideo {\n\t\treturn false\n\t}\n\tif s.VideoTrackID != \"\" {\n\t\treturn pub.SID() == s.VideoTrackID\n\t}\n\tif s.Identity != \"\" {\n\t\tif rp.Identity() != s.Identity {\n\t\t\treturn false\n\t\t}\n\t\tif s.ScreenShare {\n\t\t\treturn pub.Source() == livekit.TrackSource_SCREEN_SHARE\n\t\t}\n\t\treturn pub.Source() == livekit.TrackSource_CAMERA\n\t}\n\treturn false\n}\n\nfunc (s *SDKSource) onTrackMuted(pub lksdk.TrackPublication, _ lksdk.Participant) {\n\ts.workersMu.RLock()\n\t_, exists := s.workers[pub.SID()]\n\ts.workersMu.RUnlock()\n\tif exists {\n\t\tlogger.Debugw(\"track muted\", \"trackID\", pub.SID())\n\t}\n}\n\nfunc (s *SDKSource) onTrackUnmuted(pub lksdk.TrackPublication, _ lksdk.Participant) {\n\ts.workersMu.RLock()\n\t_, exists := s.workers[pub.SID()]\n\ts.workersMu.RUnlock()\n\tif exists {\n\t\tlogger.Debugw(\"track unmuted\", \"trackID\", pub.SID())\n\t}\n}\n\nfunc (s *SDKSource) onTrackUnsubscribed(_ *webrtc.TrackRemote, pub *lksdk.RemoteTrackPublication, _ *lksdk.RemoteParticipant) {\n\ttrackID := pub.SID()\n\n\t// Only send unsubscribe if we have a worker (i.e., we subscribed to this track)\n\ts.workersMu.RLock()\n\t_, exists := s.workers[trackID]\n\ts.workersMu.RUnlock()\n\n\tif !exists {\n\t\treturn // Never subscribed to this track, nothing to do\n\t}\n\n\tlogger.Debugw(\"track unsubscribed\", \"trackID\", trackID)\n\ts.submitOp(trackID, Operation{Type: OpUnsubscribe})\n}\n\nfunc (s *SDKSource) onParticipantDisconnected(rp *lksdk.RemoteParticipant) {\n\tif rp.Identity() == s.Identity {\n\t\tlogger.Debugw(\"participant disconnected\")\n\t\ts.finished()\n\t}\n}\n\nfunc (s *SDKSource) onDisconnected() {\n\tlogger.Warnw(\"disconnected from room\", nil)\n\ts.finished()\n}\n\nfunc (s *SDKSource) finished() {\n\ts.endRecording.Break()\n}\n\nfunc (s *SDKSource) shouldSkipTrackSubscriptions() bool {\n\treturn s.initialized.IsBroken() &&\n\t\ts.RequestType != types.RequestTypeParticipant &&\n\t\ts.RequestType != types.RequestTypeRoomComposite &&\n\t\ts.RequestType != types.RequestTypeTemplate &&\n\t\ts.RequestType != types.RequestTypeMedia\n}\n\nfunc (s *SDKSource) disconnectRoom() {\n\tif s.room != nil {\n\t\ts.room.Disconnect()\n\t\ts.room = nil\n\t}\n}\n\nfunc (s *SDKSource) shouldUseOneShotSenderReportSync() bool {\n\treturn s.RequestType == types.RequestTypeRoomComposite // one-shot correction is only useful when the audio mixer can drop late audio\n}\n\nfunc (s *SDKSource) shouldEnableOneShotSenderReportSync() bool {\n\treturn s.EnableOneShotSenderReportSync && s.shouldUseOneShotSenderReportSync()\n}\n\nfunc (s *SDKSource) shouldDisableAudioPTSAdjustment() bool {\n\treturn s.RequestType == types.RequestTypeRoomComposite || // SDK room composites are audio only - no need to adjust audio timestamps\n\t\ts.RequestType == types.RequestTypeTemplate || // SDK templates are audio only - same as room composite\n\t\ts.RequestType == types.RequestTypeTrack || // no A/V sync needed for single track requests\n\t\ts.AudioTempoController.Enabled\n}\n"
  },
  {
    "path": "pkg/pipeline/source/source.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage source\n\nimport (\n\t\"context\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/types\"\n)\n\ntype Source interface {\n\tStartRecording() <-chan struct{}\n\tEndRecording() <-chan struct{}\n\tGetStartedAt() int64\n\tGetEndedAt() int64\n\tClose()\n}\n\ntype TimeAware interface {\n\tSetTimeProvider(gstreamer.TimeProvider)\n}\n\nfunc New(ctx context.Context, p *config.PipelineConfig, callbacks *gstreamer.Callbacks) (Source, error) {\n\tswitch p.SourceType {\n\tcase types.SourceTypeWeb:\n\t\treturn NewWebSource(ctx, p)\n\n\tcase types.SourceTypeSDK:\n\t\treturn NewSDKSource(ctx, p, callbacks)\n\n\tdefault:\n\t\treturn nil, errors.ErrInvalidInput(\"request\")\n\t}\n}\n"
  },
  {
    "path": "pkg/pipeline/source/tracer.go",
    "content": "// Copyright 2025 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage source\n\nimport \"go.opentelemetry.io/otel\"\n\nvar (\n\ttracer = otel.Tracer(\"github.com/livekit/egress/pkg/pipeline/source\")\n)\n"
  },
  {
    "path": "pkg/pipeline/source/track_worker.go",
    "content": "// Copyright 2026 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage source\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/go-gst/go-gst/gst\"\n\t\"github.com/go-gst/go-gst/gst/app\"\n\t\"github.com/pion/webrtc/v4\"\n\t\"go.uber.org/atomic\"\n\n\t\"github.com/frostbyte73/core\"\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/pipeline/source/sdk\"\n\t\"github.com/livekit/egress/pkg/pipeline/tempo\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/logger\"\n\tlksdk \"github.com/livekit/server-sdk-go/v2\"\n)\n\n// TrackState represents the state of a track writer in the per-track worker state machine.\ntype TrackState int\n\nconst (\n\tTrackStateIdle     TrackState = iota // no active writer, ready for subscription\n\tTrackStateActive                     // writer is active and processing samples\n\tTrackStateCleaning                   // writer is draining after unsubscribe\n)\n\nfunc (s TrackState) String() string {\n\tswitch s {\n\tcase TrackStateIdle:\n\t\treturn \"IDLE\"\n\tcase TrackStateActive:\n\t\treturn \"ACTIVE\"\n\tcase TrackStateCleaning:\n\t\treturn \"CLEANING\"\n\tdefault:\n\t\treturn \"UNKNOWN\"\n\t}\n}\n\n// OpType represents operations that can be sent to a track worker.\ntype OpType int\n\nconst (\n\tOpSubscribe       OpType = iota // track subscribed, create writer\n\tOpUnsubscribe                   // track unsubscribed, start graceful cleanup\n\tOpFinished                      // StreamStopped, immediate cleanup\n\tOpPlaying                       // GStreamer pipeline is playing\n\tOpSetTimeProvider               // set time provider for PTS calculation\n\tOpClose                         // shutdown, drain and exit worker\n)\n\nfunc (o OpType) String() string {\n\tswitch o {\n\tcase OpSubscribe:\n\t\treturn \"Subscribe\"\n\tcase OpUnsubscribe:\n\t\treturn \"Unsubscribe\"\n\tcase OpFinished:\n\t\treturn \"Finished\"\n\tcase OpPlaying:\n\t\treturn \"Playing\"\n\tcase OpSetTimeProvider:\n\t\treturn \"SetTimeProvider\"\n\tcase OpClose:\n\t\treturn \"Close\"\n\tdefault:\n\t\treturn \"Unknown\"\n\t}\n}\n\n// Operation is a message sent to a track worker's operation channel.\ntype Operation struct {\n\tType              OpType\n\tTrack             *webrtc.TrackRemote\n\tPub               *lksdk.RemoteTrackPublication\n\tRemoteParticipant *lksdk.RemoteParticipant\n\tGeneration        uint64\n\tTimeProvider      gstreamer.TimeProvider\n\tResultChan        chan<- subscriptionResult // for init coordination (nil after init)\n}\n\n// workerState holds the mutable state for a single track worker.\n// Only accessed by the worker's goroutine, no synchronization needed.\ntype workerState struct {\n\tstate      TrackState\n\twriter     *sdk.AppWriter\n\tgeneration uint64\n}\n\n// trackWorker manages the lifecycle of a single track.\n// Each track gets its own goroutine to serialize operations and avoid cross-track blocking.\ntype trackWorker struct {\n\ttrackID    string\n\topChan     chan Operation // buffered channel for operations\n\tdone       core.Fuse      // broken when worker exits\n\tgeneration atomic.Uint64  // current generation (for Playing coordination)\n}\n\nfunc (s *SDKSource) getOrCreateWorker(trackID string) *trackWorker {\n\t// Fast path - worker exists\n\ts.workersMu.RLock()\n\tw, exists := s.workers[trackID]\n\ts.workersMu.RUnlock()\n\n\tif exists {\n\t\treturn w\n\t}\n\n\t// Slow path - need to create worker\n\ts.workersMu.Lock()\n\tdefer s.workersMu.Unlock()\n\n\tif s.closing.Load() {\n\t\treturn nil\n\t}\n\n\t// Double-check after acquiring write lock\n\tif w, exists = s.workers[trackID]; exists {\n\t\treturn w\n\t}\n\n\tw = &trackWorker{\n\t\ttrackID:    trackID,\n\t\topChan:     make(chan Operation, 100),\n\t\tgeneration: atomic.Uint64{},\n\t}\n\ts.workers[trackID] = w\n\tgo s.runWorker(w)\n\n\treturn w\n}\n\nfunc (s *SDKSource) runWorker(w *trackWorker) {\n\tdefer func() {\n\t\tw.done.Break()\n\t\ts.workersMu.Lock()\n\t\tdelete(s.workers, w.trackID)\n\t\ts.workersMu.Unlock()\n\t}()\n\tstate := &workerState{state: TrackStateIdle}\n\n\tfor op := range w.opChan {\n\t\tif exit := s.processOp(w, w.trackID, state, op); exit {\n\t\t\treturn // OpClose processed, exit immediately\n\t\t}\n\t}\n}\n\nfunc (s *SDKSource) submitOp(trackID string, op Operation) {\n\tif s.closing.Load() {\n\t\treturn\n\t}\n\n\tw := s.getOrCreateWorker(trackID)\n\tif w == nil {\n\t\treturn\n\t}\n\n\tlogger.Debugw(\"submitting operation\", \"trackID\", trackID, \"op\", op.Type.String())\n\n\tselect {\n\tcase w.opChan <- op:\n\tcase <-w.done.Watch():\n\t\t// worker already exited, op dropped\n\t}\n}\n\nfunc (s *SDKSource) reportSubscribeError(isPostInit bool, resultChan chan<- subscriptionResult, trackID string, err error) {\n\tif isPostInit {\n\t\ts.callbacks.OnError(err)\n\t} else {\n\t\ts.sendInitResult(resultChan, trackID, err)\n\t}\n}\n\nfunc (s *SDKSource) validateSubscription(op Operation) error {\n\t// Check websocket/video incompatibility for Track requests\n\tif s.RequestType == types.RequestTypeTrack &&\n\t\top.Pub.Kind() == lksdk.TrackKindVideo &&\n\t\ts.Outputs[types.EgressTypeWebsocket] != nil {\n\t\tmimeType := types.MimeType(strings.ToLower(op.Track.Codec().MimeType))\n\t\treturn errors.ErrIncompatible(\"websocket\", mimeType)\n\t}\n\treturn nil\n}\n\nfunc (s *SDKSource) updatePreInitStateLocked(op Operation, ts *config.TrackSource) {\n\t// Update codec flags based on mime type\n\tswitch ts.MimeType {\n\tcase types.MimeTypeOpus, types.MimeTypePCMU, types.MimeTypePCMA:\n\t\ts.AudioEnabled = true\n\t\tif s.AudioOutCodec == \"\" {\n\t\t\tif ts.MimeType == types.MimeTypePCMU || ts.MimeType == types.MimeTypePCMA {\n\t\t\t\ts.AudioOutCodec = types.MimeTypeOpus\n\t\t\t} else {\n\t\t\t\ts.AudioOutCodec = ts.MimeType\n\t\t\t}\n\t\t}\n\t\ts.AudioTranscoding = true\n\t\ts.AudioTracks = append(s.AudioTracks, ts)\n\n\tcase types.MimeTypeH264, types.MimeTypeVP8, types.MimeTypeVP9:\n\t\ts.VideoEnabled = true\n\t\ts.VideoInCodec = ts.MimeType\n\t\tif s.VideoOutCodec == \"\" {\n\t\t\ts.VideoOutCodec = ts.MimeType\n\t\t}\n\t\tif s.VideoInCodec != s.VideoOutCodec {\n\t\t\ts.VideoDecoding = true\n\t\t\tif len(s.GetEncodedOutputs()) > 0 {\n\t\t\t\ts.VideoEncoding = true\n\t\t\t}\n\t\t}\n\t\ts.VideoTrack = ts\n\t}\n\n\t// Set identity and filename replacements based on request type\n\ttrack := op.Track\n\tpub := op.Pub\n\trp := op.RemoteParticipant\n\tswitch s.RequestType {\n\tcase types.RequestTypeTrackComposite:\n\t\tif s.Identity == \"\" || track.Kind() == webrtc.RTPCodecTypeVideo {\n\t\t\ts.Identity = rp.Identity()\n\t\t\ts.filenameReplacements[\"{publisher_identity}\"] = s.Identity\n\t\t}\n\n\tcase types.RequestTypeTrack:\n\t\ts.Identity = rp.Identity()\n\t\ts.TrackKind = pub.Kind().String()\n\t\ts.TrackSource = strings.ToLower(pub.Source().String())\n\t\tif o := s.GetFileConfig(); o != nil {\n\t\t\to.OutputType = types.TrackOutputTypes[ts.MimeType]\n\t\t}\n\t\ts.filenameReplacements[\"{track_id}\"] = s.TrackID\n\t\ts.filenameReplacements[\"{track_type}\"] = s.TrackKind\n\t\ts.filenameReplacements[\"{track_source}\"] = s.TrackSource\n\t\ts.filenameReplacements[\"{publisher_identity}\"] = s.Identity\n\t}\n}\n\nfunc (s *SDKSource) handleSubscribe(w *trackWorker, trackID string, state *workerState, op Operation) *sdk.AppWriter {\n\ts.subLock.RLock()\n\tisPostInit := s.initialized.IsBroken()\n\tisPreInit := !isPostInit\n\n\tvar subscribeErr error\n\tdefer func() {\n\t\tif subscribeErr != nil {\n\t\t\ts.reportSubscribeError(isPostInit, op.ResultChan, trackID, subscribeErr)\n\t\t}\n\t}()\n\n\t// Early validation before creating writer\n\tif err := s.validateSubscription(op); err != nil {\n\t\tsubscribeErr = err\n\t\tlogger.Errorw(\"subscription validation failed\", err, \"trackID\", trackID)\n\t\ts.subLock.RUnlock()\n\t\treturn nil\n\t}\n\n\tstate.generation++\n\tw.generation.Store(state.generation)\n\n\twriter, ts, err := s.createWriterForOp(op)\n\tif err != nil {\n\t\tsubscribeErr = err\n\t\tlogger.Errorw(\"failed to create writer\", err, \"trackID\", trackID)\n\t\ts.subLock.RUnlock()\n\t\treturn nil\n\t}\n\n\tif s.closing.Load() {\n\t\t// Release subLock before blocking drain\n\t\ts.subLock.RUnlock()\n\t\ts.handleOrphanedWriter(trackID, writer)\n\t\treturn nil\n\t}\n\n\ts.mu.Lock()\n\tif isPreInit {\n\t\ts.updatePreInitStateLocked(op, ts)\n\t}\n\ts.mu.Unlock()\n\n\t// All validation passed - report success\n\ts.sendInitResult(op.ResultChan, trackID, nil)\n\n\t// Release subLock before transitioning to ACTIVE - we're done with pre-init work\n\ts.subLock.RUnlock()\n\n\t// For post-init subscriptions, notify pipeline to add track\n\tif isPostInit {\n\t\t<-s.callbacks.BuildReady\n\t\ts.callbacks.OnTrackAdded(ts)\n\t}\n\n\treturn writer\n}\n\nfunc (s *SDKSource) processOp(w *trackWorker, trackID string, ws *workerState, op Operation) bool {\n\tlogger.Debugw(\"processing operation\", \"trackID\", trackID, \"op\", op.Type.String(), \"state\", ws.state.String())\n\n\tswitch ws.state {\n\tcase TrackStateIdle:\n\t\treturn s.processIdleOp(w, trackID, ws, op)\n\tcase TrackStateActive:\n\t\treturn s.processActiveOp(w, trackID, ws, op)\n\tcase TrackStateCleaning:\n\t\t// Unreachable: worker blocks in startCleanup while state is CLEANING.\n\t\t// Ops queue in opChan and are processed after cleanup completes (in IDLE state).\n\t\treturn false\n\tdefault:\n\t\tlogger.Warnw(\"invalid state\", nil, \"trackID\", trackID, \"state\", ws.state.String())\n\t\treturn false\n\t}\n}\n\nfunc (s *SDKSource) processIdleOp(w *trackWorker, trackID string, state *workerState, op Operation) bool {\n\tswitch op.Type {\n\tcase OpSubscribe:\n\t\tif writer := s.handleSubscribe(w, trackID, state, op); writer != nil {\n\t\t\tstate.state = TrackStateActive\n\t\t\tstate.writer = writer\n\t\t\ts.active.Inc()\n\t\t}\n\n\tcase OpPlaying:\n\t\tlogger.Warnw(\"invalid op in IDLE\", nil, \"trackID\", trackID, \"op\", op.Type.String(), \"generation\", op.Generation)\n\tcase OpClose:\n\t\treturn true\n\tcase OpSetTimeProvider, OpUnsubscribe, OpFinished:\n\t\tlogger.Warnw(\"invalid op in IDLE\", nil, \"trackID\", trackID, \"op\", op.Type.String())\n\t}\n\treturn false\n}\n\nfunc (s *SDKSource) processActiveOp(_ *trackWorker, trackID string, state *workerState, op Operation) bool {\n\tswitch op.Type {\n\tcase OpSubscribe:\n\t\t// Not possible, double subscribe shouldn't be possible, nothing to do\n\t\tlogger.Warnw(\"unexpected subscribe in ACTIVE state\", nil, \"trackID\", trackID)\n\n\tcase OpPlaying:\n\t\tif op.Generation == state.generation && state.writer != nil {\n\t\t\tstate.writer.Playing()\n\t\t} else {\n\t\t\tlogger.Warnw(\"playing for previous writer\", nil, \"trackID\", trackID, \"op\", op.Type.String(), \"generation\", op.Generation)\n\t\t}\n\n\tcase OpSetTimeProvider:\n\t\tif state.writer != nil {\n\t\t\tstate.writer.SetTimeProvider(op.TimeProvider)\n\t\t}\n\n\tcase OpClose:\n\t\t// Drain writer (non-blocking for shutdown)\n\t\tif state.writer != nil {\n\t\t\tstate.writer.Drain(false)\n\t\t}\n\t\tstate.writer = nil\n\t\tstate.state = TrackStateIdle\n\t\ts.active.Dec()\n\t\treturn true // signal worker to exit\n\n\tcase OpUnsubscribe:\n\t\tstate.state = TrackStateCleaning\n\t\ts.startCleanup(trackID, state)\n\n\tcase OpFinished:\n\t\t// StreamStopped - immediate cleanup\n\t\tstate.state = TrackStateCleaning\n\t\ts.doCleanup(trackID, state)\n\t}\n\treturn false\n}\n\n// Cleanup functions\n\nfunc (s *SDKSource) startCleanup(trackID string, state *workerState) {\n\twriter := state.writer\n\twriter.OnUnsubscribed()\n\n\t// Wait for writer to finish, but also handle shutdown\n\tselect {\n\tcase <-writer.Finished():\n\t\t// normal completion\n\tcase <-s.closed.Watch():\n\t\t// shutdown - force drain like old CloseWriters did\n\t\twriter.Drain(false)\n\t}\n\n\ts.doCleanup(trackID, state)\n}\n\nfunc (s *SDKSource) doCleanup(trackID string, state *workerState) {\n\twriter := state.writer\n\tif writer == nil {\n\t\t// Already cleaned up (defensive guard for future code paths)\n\t\tstate.state = TrackStateIdle\n\t\treturn\n\t}\n\tstate.writer = nil\n\n\t// Blocking cleanup - only affects this track's worker\n\tactive := s.active.Dec()\n\tshouldContinue := s.RequestType == types.RequestTypeParticipant ||\n\t\ts.RequestType == types.RequestTypeRoomComposite ||\n\t\ts.RequestType == types.RequestTypeTemplate ||\n\t\ts.RequestType == types.RequestTypeMedia\n\n\tif shouldContinue {\n\t\ttrackKind := writer.TrackKind()\n\t\tif trackKind == webrtc.RTPCodecTypeAudio {\n\t\t\twriter.Drain(true)\n\t\t}\n\t\ts.sync.RemoveTrack(trackID)\n\t\t<-s.callbacks.BuildReady\n\t\ts.callbacks.OnTrackRemoved(trackID)\n\t\tif trackKind == webrtc.RTPCodecTypeVideo {\n\t\t\twriter.Drain(true)\n\t\t}\n\t} else {\n\t\twriter.Drain(true)\n\t\tif active == 0 {\n\t\t\ts.finished()\n\t\t}\n\t}\n\n\tstate.state = TrackStateIdle\n}\n\n// ---------------- Helper functions ----------------\n\nfunc (s *SDKSource) createWriterForOp(op Operation) (*sdk.AppWriter, *config.TrackSource, error) {\n\ttrack, pub, rp := op.Track, op.Pub, op.RemoteParticipant\n\n\t<-s.callbacks.GstReady\n\n\tsrc, err := gst.NewElementWithName(\"appsrc\", fmt.Sprintf(\"app_%s\", track.ID()))\n\tif err != nil {\n\t\treturn nil, nil, errors.ErrGstPipelineError(err)\n\t}\n\n\tts := &config.TrackSource{\n\t\tTrackID:         pub.SID(),\n\t\tTrackKind:       pub.Kind(),\n\t\tParticipantKind: rp.Kind(),\n\t\tMimeType:        types.MimeType(strings.ToLower(track.Codec().MimeType)),\n\t\tPayloadType:     track.Codec().PayloadType,\n\t\tClockRate:       track.Codec().ClockRate,\n\t}\n\n\t// Set audio channel from route match (RequestTypeMedia)\n\ts.mu.Lock()\n\tif ch, ok := s.audioChannels[pub.SID()]; ok {\n\t\tts.AudioChannel = &ch\n\t}\n\ts.mu.Unlock()\n\n\tts.AppSrc = app.SrcFromElement(src)\n\n\tvar tc sdk.DriftHandler\n\n\t// Handle codec-specific setup (tempo controller for audio)\n\tswitch ts.MimeType {\n\tcase types.MimeTypeOpus, types.MimeTypePCMU, types.MimeTypePCMA:\n\t\tif s.AudioTempoController.Enabled {\n\t\t\tc := tempo.NewController()\n\t\t\tts.TempoController = c\n\t\t\ttc = c\n\t\t}\n\n\tcase types.MimeTypeH264, types.MimeTypeVP8, types.MimeTypeVP9:\n\t\t// Video codecs - no special setup needed here\n\n\tdefault:\n\t\treturn nil, nil, errors.ErrNotSupported(string(ts.MimeType))\n\t}\n\n\twriter, err := sdk.NewAppWriter(s.PipelineConfig, track, pub, rp, ts, s.sync, tc, s.callbacks)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif tp := s.timeProvider.Load(); tp != nil {\n\t\twriter.SetTimeProvider(*tp)\n\t}\n\n\treturn writer, ts, nil\n}\n\nfunc (s *SDKSource) handleOrphanedWriter(trackID string, writer *sdk.AppWriter) {\n\twriter.Drain(true)\n\tlogger.Debugw(\"orphaned writer cleaned up\", \"trackID\", trackID)\n}\n"
  },
  {
    "path": "pkg/pipeline/source/track_worker_test.go",
    "content": "// Copyright 2026 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage source\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"go.uber.org/atomic\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/server-sdk-go/v2/pkg/synchronizer\"\n)\n\n// testSDKSource creates a minimal SDKSource for testing state transitions\nfunc testSDKSource(t *testing.T) *SDKSource {\n\tt.Helper()\n\n\tbuildReady := make(chan struct{})\n\tclose(buildReady) // already ready\n\n\tcallbacks := &gstreamer.Callbacks{\n\t\tBuildReady: buildReady,\n\t}\n\tcallbacks.AddOnTrackRemoved(func(_ string) {})\n\n\tpipelineConfig := &config.PipelineConfig{\n\t\tRequestType: types.RequestTypeRoomComposite,\n\t}\n\n\treturn &SDKSource{\n\t\tPipelineConfig:       pipelineConfig,\n\t\tcallbacks:            callbacks,\n\t\tsync:                 synchronizer.NewSynchronizer(nil),\n\t\tworkers:              make(map[string]*trackWorker),\n\t\tfilenameReplacements: make(map[string]string),\n\t\tactive:               atomic.Int32{},\n\t\tclosing:              atomic.Bool{},\n\t}\n}\n\nfunc TestGetOrCreateWorker_ReturnsExistingWorker(t *testing.T) {\n\ts := testSDKSource(t)\n\n\tw1 := s.getOrCreateWorker(\"track-1\")\n\tw2 := s.getOrCreateWorker(\"track-1\")\n\n\tassert.Equal(t, w1, w2, \"should return same worker for same trackID\")\n}\n\nfunc TestGetOrCreateWorker_ReturnsNilWhenClosing(t *testing.T) {\n\ts := testSDKSource(t)\n\ts.closing.Store(true)\n\n\tw := s.getOrCreateWorker(\"track-1\")\n\n\tassert.Nil(t, w, \"should return nil when closing\")\n}\n\nfunc TestSubmitOp_DropsOpWhenClosing(t *testing.T) {\n\ts := testSDKSource(t)\n\ts.closing.Store(true)\n\n\t// This should not panic or block\n\ts.submitOp(\"track-1\", Operation{Type: OpPlaying})\n\n\ts.workersMu.RLock()\n\t_, exists := s.workers[\"track-1\"]\n\ts.workersMu.RUnlock()\n\n\tassert.False(t, exists)\n}\n\nfunc TestStateTransitions_IdleState(t *testing.T) {\n\ttests := []struct {\n\t\tname      string\n\t\top        OpType\n\t\twantState TrackState\n\t\twantExit  bool\n\t}{\n\t\t// Valid ops in IDLE\n\t\t{\"OpClose exits\", OpClose, TrackStateIdle, true},\n\t\t{\"OpPlaying stays IDLE\", OpPlaying, TrackStateIdle, false},\n\t\t{\"OpSetTimeProvider stays IDLE\", OpSetTimeProvider, TrackStateIdle, false},\n\t\t// Invalid ops in IDLE (should log warning but not crash)\n\t\t{\"OpUnsubscribe stays IDLE\", OpUnsubscribe, TrackStateIdle, false},\n\t\t{\"OpFinished stays IDLE\", OpFinished, TrackStateIdle, false},\n\t\t// Note: OpSubscribe requires GStreamer, tested in integration tests\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\ts := testSDKSource(t)\n\t\t\tw := &trackWorker{\n\t\t\t\ttrackID:    \"test-track\",\n\t\t\t\tgeneration: atomic.Uint64{},\n\t\t\t}\n\t\t\tstate := &workerState{state: TrackStateIdle}\n\n\t\t\texit := s.processOp(w, \"test-track\", state, Operation{Type: tt.op})\n\n\t\t\tassert.Equal(t, tt.wantExit, exit, \"exit mismatch\")\n\t\t\tassert.Equal(t, tt.wantState, state.state, \"state mismatch\")\n\t\t})\n\t}\n}\n\n// Note: Operations that need a real writer (Unsubscribe, Finished) require integration tests\nfunc TestStateTransitions_ActiveState(t *testing.T) {\n\ttests := []struct {\n\t\tname      string\n\t\top        OpType\n\t\twantState TrackState\n\t\twantExit  bool\n\t}{\n\t\t{\"OpClose drains and exits\", OpClose, TrackStateIdle, true},\n\t\t{\"OpPlaying stays ACTIVE\", OpPlaying, TrackStateActive, false},\n\t\t{\"OpSetTimeProvider stays ACTIVE\", OpSetTimeProvider, TrackStateActive, false},\n\t\t{\"OpSubscribe stays ACTIVE (invalid)\", OpSubscribe, TrackStateActive, false},\n\t\t// Note: OpUnsubscribe and OpFinished trigger cleanup which needs a real writer\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\ts := testSDKSource(t)\n\t\t\tw := &trackWorker{\n\t\t\t\ttrackID:    \"test-track\",\n\t\t\t\tgeneration: atomic.Uint64{},\n\t\t\t}\n\t\t\t// Start in ACTIVE state with nil writer (ok for ops that don't use it)\n\t\t\tstate := &workerState{\n\t\t\t\tstate:      TrackStateActive,\n\t\t\t\twriter:     nil,\n\t\t\t\tgeneration: 1,\n\t\t\t}\n\t\t\ts.active.Store(1) // simulate one active track\n\n\t\t\texit := s.processOp(w, \"test-track\", state, Operation{Type: tt.op, Generation: 1})\n\n\t\t\tassert.Equal(t, tt.wantExit, exit, \"exit mismatch\")\n\t\t\tassert.Equal(t, tt.wantState, state.state, \"state mismatch\")\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "pkg/pipeline/source/web.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage source\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gopkg.in/natefinch/lumberjack.v2\"\n\n\t\"github.com/chromedp/cdproto/inspector\"\n\t\"github.com/chromedp/cdproto/runtime\"\n\t\"github.com/chromedp/cdproto/target\"\n\t\"github.com/chromedp/chromedp\"\n\t\"github.com/frostbyte73/core\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/logger/medialogutils\"\n)\n\nconst (\n\tstartRecordingLog = \"START_RECORDING\"\n\tendRecordingLog   = \"END_RECORDING\"\n\n\tchromeFailedToStart       = \"chrome failed to start:\"\n\tchromeCertVerifierChanged = \"net::ERR_CERT_VERIFIER_CHANGED\"\n\n\tchromeTimeout = time.Second * 30\n\tchromeRetries = 3\n)\n\ntype WebSource struct {\n\tpulseSink    string\n\txvfb         *exec.Cmd\n\tcloseChrome  context.CancelFunc\n\tchromeLogger *lumberjack.Logger\n\n\tstartRecording core.Fuse\n\tendRecording   core.Fuse\n\tclosed         core.Fuse\n\n\tinfo *livekit.EgressInfo\n}\n\nfunc NewWebSource(ctx context.Context, p *config.PipelineConfig) (*WebSource, error) {\n\tctx, span := tracer.Start(ctx, \"WebInput.New\")\n\tdefer span.End()\n\n\tp.Display = fmt.Sprintf(\":%d\", 10+rand.Intn(2147483637))\n\n\ts := &WebSource{\n\t\tinfo: p.Info,\n\t}\n\tif !p.AwaitStartSignal {\n\t\ts.startRecording.Break()\n\t}\n\n\tif err := s.createPulseSink(ctx, p); err != nil {\n\t\tlogger.Errorw(\"failed to create pulse sink\", err)\n\t\ts.Close()\n\t\treturn nil, err\n\t}\n\n\tif err := s.launchXvfb(ctx, p); err != nil {\n\t\tlogger.Errorw(\"failed to launch xvfb\", err, \"display\", p.Display)\n\t\ts.Close()\n\t\treturn nil, err\n\t}\n\n\tif err := s.launchChrome(ctx, p); err != nil {\n\t\tlogger.Warnw(\"failed to launch chrome\", err)\n\t\ts.Close()\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}\n\nfunc (s *WebSource) StartRecording() <-chan struct{} {\n\treturn s.startRecording.Watch()\n}\n\nfunc (s *WebSource) EndRecording() <-chan struct{} {\n\treturn s.endRecording.Watch()\n}\n\nfunc (s *WebSource) GetStartedAt() int64 {\n\treturn time.Now().UnixNano()\n}\n\nfunc (s *WebSource) GetEndedAt() int64 {\n\treturn time.Now().UnixNano()\n}\n\nfunc (s *WebSource) Close() {\n\ts.closed.Once(func() {\n\t\tif s.closeChrome != nil {\n\t\t\tlogger.Debugw(\"closing chrome\")\n\t\t\ts.closeChrome()\n\t\t}\n\n\t\tif s.xvfb != nil {\n\t\t\tlogger.Debugw(\"closing X display\")\n\t\t\t_ = s.xvfb.Process.Kill()\n\t\t\t_ = s.xvfb.Wait()\n\t\t}\n\n\t\tif s.pulseSink != \"\" {\n\t\t\tlogger.Debugw(\"unloading pulse module\")\n\t\t\tif err := exec.Command(\"pactl\", \"unload-module\", s.pulseSink).Run(); err != nil {\n\t\t\t\tlogger.Errorw(\"failed to unload pulse sink\", err)\n\t\t\t}\n\t\t}\n\t\tif s.chromeLogger != nil {\n\t\t\t_ = s.chromeLogger.Close()\n\t\t\ts.chromeLogger = nil\n\t\t}\n\t})\n}\n\n// creates a new pulse audio sink\nfunc (s *WebSource) createPulseSink(ctx context.Context, p *config.PipelineConfig) error {\n\t_, span := tracer.Start(ctx, \"WebInput.createPulseSink\")\n\tdefer span.End()\n\n\tlogger.Debugw(\"creating pulse sink\")\n\tcmd := exec.Command(\"pactl\",\n\t\t\"load-module\", \"module-null-sink\",\n\t\tfmt.Sprintf(\"sink_name=\\\"%s\\\"\", p.Info.EgressId),\n\t\tfmt.Sprintf(\"sink_properties=device.description=\\\"%s\\\"\", p.Info.EgressId),\n\t)\n\tvar b bytes.Buffer\n\tl := medialogutils.NewCmdLogger(func(s string) {\n\t\tlogger.Infow(fmt.Sprintf(\"pactl: %s\", s))\n\t})\n\tcmd.Stdout = &b\n\tcmd.Stderr = l\n\terr := cmd.Run()\n\tif err != nil {\n\t\tif out := b.Bytes(); out != nil {\n\t\t\t_, _ = l.Write(out)\n\t\t}\n\t\treturn errors.ErrProcessFailed(\"pulse\", err)\n\t}\n\n\ts.pulseSink = strings.TrimRight(b.String(), \"\\n\")\n\treturn nil\n}\n\n// creates a new xvfb display\nfunc (s *WebSource) launchXvfb(ctx context.Context, p *config.PipelineConfig) error {\n\t_, span := tracer.Start(ctx, \"WebInput.launchXvfb\")\n\tdefer span.End()\n\n\tdims := fmt.Sprintf(\"%dx%dx%d\", p.Width, p.Height, p.Depth)\n\tlogger.Debugw(\"creating X display\", \"display\", p.Display, \"dims\", dims)\n\txvfb := exec.Command(\"Xvfb\", p.Display, \"-screen\", \"0\", dims, \"-ac\", \"-nolisten\", \"tcp\", \"-nolisten\", \"unix\")\n\tif err := xvfb.Start(); err != nil {\n\t\treturn errors.ErrProcessFailed(\"xvfb\", err)\n\t}\n\n\ts.xvfb = xvfb\n\treturn nil\n}\n\nfunc newChromeLogger(tmpDir string) *lumberjack.Logger {\n\twriter := &lumberjack.Logger{\n\t\tFilename:   filepath.Join(tmpDir, \"chrome.log\"),\n\t\tMaxSize:    100, // MB per file (smallest unit)\n\t\tMaxBackups: 1,   // current + 1 backup = 2 files total\n\t\tMaxAge:     7,   // days\n\t\tCompress:   false,\n\t}\n\treturn writer\n}\n\n// launches chrome and navigates to the url\nfunc (s *WebSource) launchChrome(ctx context.Context, p *config.PipelineConfig) error {\n\t_, span := tracer.Start(ctx, \"WebInput.launchChrome\")\n\tdefer span.End()\n\n\twebUrl := p.WebUrl\n\tif webUrl == \"\" {\n\t\t// build input url\n\t\tinputUrl, err := url.Parse(p.BaseUrl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvalues := inputUrl.Query()\n\t\tvalues.Set(\"layout\", p.Layout)\n\t\tvalues.Set(\"url\", p.WsUrl)\n\t\tvalues.Set(\"token\", p.Token)\n\t\tinputUrl.RawQuery = values.Encode()\n\t\twebUrl = inputUrl.String()\n\t}\n\n\tif p.Debug.EnableChromeLogging {\n\t\ts.chromeLogger = newChromeLogger(os.TempDir())\n\t}\n\n\tlogger.Debugw(\"launching chrome\", \"url\", webUrl, \"sandbox\", p.EnableChromeSandbox, \"insecure\", p.Insecure)\n\n\topts := []chromedp.ExecAllocatorOption{\n\t\tchromedp.NoFirstRun,\n\t\tchromedp.NoDefaultBrowserCheck,\n\t\tchromedp.DisableGPU,\n\n\t\t// puppeteer default behavior\n\t\tchromedp.Flag(\"disable-infobars\", true),\n\t\tchromedp.Flag(\"excludeSwitches\", \"enable-automation\"),\n\t\tchromedp.Flag(\"disable-background-networking\", true),\n\t\tchromedp.Flag(\"enable-features\", \"NetworkService,NetworkServiceInProcess\"),\n\t\tchromedp.Flag(\"disable-background-timer-throttling\", true),\n\t\tchromedp.Flag(\"disable-backgrounding-occluded-windows\", true),\n\t\tchromedp.Flag(\"disable-breakpad\", true),\n\t\tchromedp.Flag(\"disable-client-side-phishing-detection\", true),\n\t\tchromedp.Flag(\"disable-default-apps\", true),\n\t\tchromedp.Flag(\"disable-dev-shm-usage\", true),\n\t\tchromedp.Flag(\"disable-extensions\", true),\n\t\tchromedp.Flag(\"disable-features\", \"AudioServiceOutOfProcess,site-per-process,Translate,TranslateUI,BlinkGenPropertyTrees\"),\n\t\tchromedp.Flag(\"disable-hang-monitor\", true),\n\t\tchromedp.Flag(\"disable-ipc-flooding-protection\", true),\n\t\tchromedp.Flag(\"disable-popup-blocking\", true),\n\t\tchromedp.Flag(\"disable-prompt-on-repost\", true),\n\t\tchromedp.Flag(\"disable-renderer-backgrounding\", true),\n\t\tchromedp.Flag(\"disable-sync\", true),\n\t\tchromedp.Flag(\"force-color-profile\", \"srgb\"),\n\t\tchromedp.Flag(\"metrics-recording-only\", true),\n\t\tchromedp.Flag(\"safebrowsing-disable-auto-update\", true),\n\t\tchromedp.Flag(\"password-store\", \"basic\"),\n\t\tchromedp.Flag(\"use-mock-keychain\", true),\n\n\t\t// custom args\n\t\tchromedp.Flag(\"kiosk\", true),\n\t\tchromedp.Flag(\"disable-translate\", true),\n\t\tchromedp.Flag(\"enable-automation\", false),\n\t\tchromedp.Flag(\"autoplay-policy\", \"no-user-gesture-required\"),\n\t\tchromedp.Flag(\"window-position\", \"0,0\"),\n\n\t\t// config\n\t\tchromedp.Flag(\"window-size\", fmt.Sprintf(\"%d,%d\", p.Width, p.Height)),\n\t\tchromedp.Flag(\"disable-web-security\", p.Insecure),\n\t\tchromedp.Flag(\"allow-running-insecure-content\", p.Insecure),\n\t\tchromedp.Flag(\"no-sandbox\", !p.EnableChromeSandbox),\n\n\t\t// output\n\t\tchromedp.Env(fmt.Sprintf(\"PULSE_SINK=%s\", p.Info.EgressId)),\n\t\tchromedp.Flag(\"display\", p.Display),\n\t}\n\n\t// custom\n\tfor k, v := range p.ChromeFlags {\n\t\topts = append(opts, chromedp.Flag(k, v))\n\t}\n\n\tallocCtx, allocCancel := chromedp.NewExecAllocator(context.Background(), opts...)\n\n\tvar err error\n\tvar retryable bool\n\tfor i := range chromeRetries {\n\t\tif i > 0 {\n\t\t\tlogger.Debugw(\"navigation timed out, reloading\")\n\t\t}\n\n\t\tchromeCtx, chromeCancel := chromedp.NewContext(allocCtx)\n\t\ts.closeChrome = func() {\n\t\t\tchromeCancel()\n\t\t\tallocCancel()\n\t\t}\n\n\t\terr, retryable = s.navigate(chromeCtx, chromeCancel, webUrl)\n\t\tif !retryable {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc (s *WebSource) navigate(chromeCtx context.Context, chromeCancel context.CancelFunc, webUrl string) (error, bool) {\n\tchromedp.ListenTarget(chromeCtx, func(ev interface{}) {\n\t\tswitch ev := ev.(type) {\n\t\tcase *runtime.EventConsoleAPICalled:\n\t\t\tif s.chromeLogger != nil {\n\t\t\t\tif b, err := json.Marshal(ev); err == nil {\n\t\t\t\t\t_, _ = s.chromeLogger.Write(append(b, '\\n'))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, arg := range ev.Args {\n\t\t\t\tvar val interface{}\n\t\t\t\terr := json.Unmarshal(arg.Value, &val)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tswitch fmt.Sprint(val) {\n\t\t\t\tcase startRecordingLog:\n\t\t\t\t\tlogger.Infow(\"chrome: START_RECORDING\")\n\t\t\t\t\ts.startRecording.Break()\n\n\t\t\t\tcase endRecordingLog:\n\t\t\t\t\tlogger.Infow(\"chrome: END_RECORDING\")\n\t\t\t\t\ts.endRecording.Break()\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase *runtime.EventExceptionThrown:\n\t\t\tif s.chromeLogger != nil {\n\t\t\t\tif b, err := json.Marshal(ev); err == nil {\n\t\t\t\t\t_, _ = s.chromeLogger.Write(append(b, '\\n'))\n\t\t\t\t}\n\t\t\t}\n\t\t\tlogger.Debugw(\"chrome exception\", \"err\", ev.ExceptionDetails.Error())\n\n\t\tcase *target.EventTargetCrashed:\n\t\t\tlogger.Errorw(\"chrome crashed\", nil, \"targetId\", ev.TargetID, \"status\", ev.Status, \"errorCode\", ev.ErrorCode)\n\n\t\tcase *inspector.EventTargetCrashed:\n\t\t\tlogger.Errorw(\"chrome crashed\", nil)\n\t\t}\n\t})\n\n\t// navigate\n\tvar timeout *time.Timer\n\tvar errString string\n\tif err := chromedp.Run(chromeCtx,\n\t\tchromedp.ActionFunc(func(_ context.Context) error {\n\t\t\tlogger.Debugw(\"chrome initialized\")\n\t\t\t// set page load timeout\n\t\t\ttimeout = time.AfterFunc(chromeTimeout, chromeCancel)\n\t\t\treturn nil\n\t\t}),\n\t\tchromedp.ActionFunc(func(ctx context.Context) error {\n\t\t\t// use RunResponse wrapped in ActionFunc to get the response details\n\t\t\tr, err := chromedp.RunResponse(ctx, chromedp.Navigate(webUrl))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif r.Status >= 400 {\n\t\t\t\treturn errors.PageLoadError(r.StatusText)\n\t\t\t}\n\t\t\treturn nil\n\t\t}),\n\t\tchromedp.ActionFunc(func(_ context.Context) error {\n\t\t\t// cancel timer\n\t\t\ttimeout.Stop()\n\t\t\treturn nil\n\t\t}),\n\t\tchromedp.Evaluate(`\n\t\t\tif (document.querySelector('div.error')) {\n\t\t\t\tdocument.querySelector('div.error').innerText;\n\t\t\t} else {\n\t\t\t\t''\n\t\t\t}`, &errString),\n\t); err != nil {\n\t\tif errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {\n\t\t\treturn errors.PageLoadError(\"timed out\"), true\n\t\t}\n\t\tif strings.HasPrefix(err.Error(), chromeFailedToStart) {\n\t\t\treturn errors.ChromeError(err), false\n\t\t}\n\t\tif strings.Contains(err.Error(), chromeCertVerifierChanged) {\n\t\t\tlogger.Warnw(\"chrome cert verifier changed, retrying\", nil)\n\t\t\treturn errors.PageLoadError(err.Error()), true\n\t\t}\n\t\treturn errors.PageLoadError(err.Error()), false\n\t} else if errString != \"\" {\n\t\treturn errors.TemplateError(errString), false\n\t}\n\n\treturn nil, false\n}\n"
  },
  {
    "path": "pkg/pipeline/tempo/controller.go",
    "content": "package tempo\n\nimport (\n\t\"time\"\n\n\t\"github.com/linkdata/deadlock\"\n)\n\nconst (\n\tDefaultThreshold = 10 * time.Millisecond // don’t start tiny corrections\n\tMaxDriftBudget   = 2 * time.Second       // cap on processed drift magnitude\n)\n\ntype Controller struct {\n\tmu deadlock.Mutex\n\n\tpending   time.Duration // accumulated, not yet started\n\tcurrent   time.Duration // currently being corrected\n\tprocessed time.Duration // signed sum of ALL corrections already applied\n\n\tcb func(time.Duration) // invoked with the next 'current' to apply\n}\n\nfunc NewController() *Controller { return &Controller{} }\n\n// EnqueueDrift adds signed drift. It may synchronously arm a new correction\n// if idle, above threshold, and starting it would not exceed the budget.\nfunc (tc *Controller) EnqueueDrift(drift time.Duration) {\n\tif drift == 0 {\n\t\treturn\n\t}\n\n\ttc.mu.Lock()\n\ttc.pending += drift\n\n\tvar toStart time.Duration\n\tif tc.current == 0 && tc.pending.Abs() >= DefaultThreshold {\n\t\t// Only start if applying 'pending' keeps processed within budget.\n\t\tif (tc.processed + tc.pending).Abs() < MaxDriftBudget {\n\t\t\ttoStart = tc.pending\n\t\t\ttc.current = toStart\n\t\t\ttc.pending = 0\n\t\t}\n\t}\n\tcb := tc.cb\n\ttc.mu.Unlock()\n\n\tif toStart != 0 && cb != nil {\n\t\tcb(toStart)\n\t}\n}\n\n// DriftProcessed marks the *current* correction as finished and may start the next\n// one if available and within budget.\nfunc (tc *Controller) DriftProcessed() {\n\ttc.mu.Lock()\n\ttc.processed += tc.current\n\ttc.current = 0\n\n\tvar toStart time.Duration\n\tif tc.pending.Abs() >= DefaultThreshold && (tc.processed+tc.pending).Abs() < MaxDriftBudget {\n\t\ttoStart = tc.pending\n\t\ttc.current = toStart\n\t\ttc.pending = 0\n\t}\n\tcb := tc.cb\n\ttc.mu.Unlock()\n\n\tif toStart != 0 && cb != nil {\n\t\tcb(toStart)\n\t}\n}\n\n// OnDriftDetectedCallback sets the callback. If a correction is already armed,\n// it’s invoked immediately with that value.\nfunc (tc *Controller) OnDriftDetectedCallback(cb func(time.Duration)) {\n\ttc.mu.Lock()\n\ttc.cb = cb\n\tcur := tc.current\n\ttc.mu.Unlock()\n\n\tif cb != nil && cur != 0 {\n\t\tcb(cur)\n\t}\n}\n\n// Processed returns the total of already-applied corrections.\nfunc (tc *Controller) Processed() time.Duration {\n\ttc.mu.Lock()\n\tdefer tc.mu.Unlock()\n\treturn tc.processed\n}\n"
  },
  {
    "path": "pkg/pipeline/tempo/controller_test.go",
    "content": "package tempo\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestEnqueueStartsWithinBudget(t *testing.T) {\n\ttc := NewController()\n\n\tvar calls []time.Duration\n\ttc.OnDriftDetectedCallback(func(d time.Duration) { calls = append(calls, d) })\n\n\ttc.EnqueueDrift(30 * time.Millisecond) // > threshold, under budget\n\tif len(calls) != 1 || calls[0] != 30*time.Millisecond {\n\t\tt.Fatalf(\"callback: got %v, want [30ms]\", calls)\n\t}\n\tif got := tc.Processed(); got != 0 {\n\t\tt.Fatalf(\"processed before DriftProcessed: got %v, want 0\", got)\n\t}\n}\n\nfunc TestThresholdAccumulation(t *testing.T) {\n\ttc := NewController()\n\n\tvar calls []time.Duration\n\ttc.OnDriftDetectedCallback(func(d time.Duration) { calls = append(calls, d) })\n\n\ttc.EnqueueDrift(5 * time.Millisecond) // below threshold → no start\n\ttc.EnqueueDrift(6 * time.Millisecond) // total 11ms → start now\n\n\tif len(calls) != 1 || calls[0] != 11*time.Millisecond {\n\t\tt.Fatalf(\"callback: got %v, want [11ms]\", calls)\n\t}\n}\n\nfunc TestDriftProcessedStartsNext(t *testing.T) {\n\ttc := NewController()\n\n\tvar calls []time.Duration\n\ttc.OnDriftDetectedCallback(func(d time.Duration) { calls = append(calls, d) })\n\n\ttc.EnqueueDrift(30 * time.Millisecond) // starts immediately\n\tif len(calls) != 1 || calls[0] != 30*time.Millisecond {\n\t\tt.Fatalf(\"first start: got %v\", calls)\n\t}\n\n\ttc.EnqueueDrift(40 * time.Millisecond) // pending, not started yet\n\tif len(calls) != 1 {\n\t\tt.Fatalf(\"should not start second yet: got %v\", calls)\n\t}\n\n\ttc.DriftProcessed() // finish first → second starts\n\tif len(calls) != 2 || calls[1] != 40*time.Millisecond {\n\t\tt.Fatalf(\"second start: got %v\", calls)\n\t}\n\n\tif got := tc.Processed(); got != 30*time.Millisecond {\n\t\tt.Fatalf(\"processed after first completion: got %v, want 30ms\", got)\n\t}\n}\n\nfunc TestBudgetBlocksAndResumes(t *testing.T) {\n\ttc := NewController()\n\n\tvar calls []time.Duration\n\ttc.OnDriftDetectedCallback(func(d time.Duration) { calls = append(calls, d) })\n\n\t// Spend most of the budget (1.9s)\n\ttc.EnqueueDrift(1900 * time.Millisecond)\n\tif len(calls) != 1 || calls[0] != 1900*time.Millisecond {\n\t\tt.Fatalf(\"start 1.9s: got %v\", calls)\n\t}\n\ttc.DriftProcessed()\n\tif got := tc.Processed(); got != 1900*time.Millisecond {\n\t\tt.Fatalf(\"processed after 1.9s: got %v\", got)\n\t}\n\n\t// +300ms would exceed 2s budget → must NOT start\n\ttc.EnqueueDrift(300 * time.Millisecond)\n\tif len(calls) != 1 {\n\t\tt.Fatalf(\"over-budget should not start: got %v\", calls)\n\t}\n\n\t// Add -650ms → pending becomes -350ms → net processed+pending = 1.55s → start\n\ttc.EnqueueDrift(-650 * time.Millisecond)\n\tif len(calls) != 2 || calls[1] != -350*time.Millisecond {\n\t\tt.Fatalf(\"start -350ms: got %v\", calls)\n\t}\n\ttc.DriftProcessed()\n\tif got := tc.Processed(); got != (1900*time.Millisecond - 350*time.Millisecond) {\n\t\tt.Fatalf(\"processed signed total: got %v, want 1.55s\", got)\n\t}\n}\n\nfunc TestImmediateCallbackOnRegister(t *testing.T) {\n\ttc := NewController()\n\n\t// Arm a correction before registering callback\n\ttc.EnqueueDrift(20 * time.Millisecond)\n\n\tvar calls []time.Duration\n\ttc.OnDriftDetectedCallback(func(d time.Duration) { calls = append(calls, d) })\n\n\t// Should fire immediately with current\n\tif len(calls) != 1 || calls[0] != 20*time.Millisecond {\n\t\tt.Fatalf(\"immediate callback: got %v, want [20ms]\", calls)\n\t}\n}\n\nfunc TestZeroDriftNoop(t *testing.T) {\n\ttc := NewController()\n\n\tvar calls []time.Duration\n\ttc.OnDriftDetectedCallback(func(d time.Duration) { calls = append(calls, d) })\n\n\ttc.EnqueueDrift(0)\n\tif len(calls) != 0 {\n\t\tt.Fatalf(\"zero drift should do nothing, got %v\", calls)\n\t}\n}\n\nfunc TestSignedProcessedAccumulation(t *testing.T) {\n\ttc := NewController()\n\n\tvar calls []time.Duration\n\ttc.OnDriftDetectedCallback(func(d time.Duration) { calls = append(calls, d) })\n\n\ttc.EnqueueDrift(30 * time.Millisecond)\n\ttc.DriftProcessed()\n\tif got := tc.Processed(); got != 30*time.Millisecond {\n\t\tt.Fatalf(\"after +30ms processed: got %v\", got)\n\t}\n\n\ttc.EnqueueDrift(-10 * time.Millisecond)\n\ttc.DriftProcessed()\n\tif got := tc.Processed(); got != 20*time.Millisecond {\n\t\tt.Fatalf(\"after +30-10 processed: got %v, want 20ms\", got)\n\t}\n}\n"
  },
  {
    "path": "pkg/pipeline/watch.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage pipeline\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/go-gst/go-gst/gst\"\n\n\t\"github.com/livekit/protocol/logger\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/gstreamer\"\n\t\"github.com/livekit/egress/pkg/pipeline/builder\"\n\t\"github.com/livekit/egress/pkg/pipeline/source\"\n)\n\nconst (\n\t// noisy gst errors\n\tmsgWrongThread = \"Called from wrong thread\"\n\n\t// noisy gst warnings\n\tmsgKeyframe                    = \"Could not request a keyframe. Files may not split at the exact location they should\"\n\tmsgLatencyQuery                = \"Latency query failed\"\n\tmsgTaps                        = \"can't find exact taps\"\n\tmsgInputDisappeared            = \"Can't copy metadata because input buffer disappeared\"\n\tmsgSkippingSegment             = \"error reading data -1 (reason: Success), skipping segment\"\n\tfnGstAudioResampleCheckDiscont = \"gst_audio_resample_check_discont\"\n\n\t// noisy colorimetry warnings from decoders that omit VUI color info\n\tmsgColorMatrix        = \"Need to specify a color matrix when using YUV format (I420)\"\n\tmsgInvalidColorimetry = \"invalid colorimetry, using default\"\n\n\t// noisy gst fixmes\n\tmsgStreamStart       = \"stream-start event without group-id. Consider implementing group-id handling in the upstream elements\"\n\tmsgCreatingStream    = \"Creating random stream-id, consider implementing a deterministic way of creating a stream-id\"\n\tmsgAggregateSubclass = \"Subclass should call gst_aggregator_selected_samples() from its aggregate implementation.\"\n\n\t// rtmp client\n\tcatRtmpClient      = \"rtmpclient\"\n\tfnSendCreateStream = \"send_create_stream\"\n)\n\nvar (\n\tlogLevels = map[gst.DebugLevel]string{\n\t\tgst.LevelError:   \"error\",\n\t\tgst.LevelWarning: \"warning\",\n\t\tgst.LevelFixMe:   \"fixme\",\n\t\tgst.LevelInfo:    \"info\",\n\t\tgst.LevelDebug:   \"debug\",\n\t\tgst.LevelLog:     \"log\",\n\t\tgst.LevelTrace:   \"trace\",\n\t\tgst.LevelMemDump: \"memdump\",\n\t}\n\n\tignore = map[string]bool{\n\t\tmsgWrongThread:                 true,\n\t\tmsgKeyframe:                    true,\n\t\tmsgLatencyQuery:                true,\n\t\tmsgTaps:                        true,\n\t\tmsgInputDisappeared:            true,\n\t\tmsgSkippingSegment:             true,\n\t\tfnGstAudioResampleCheckDiscont: true,\n\t\tmsgColorMatrix:                 true,\n\t\tmsgInvalidColorimetry:          true,\n\t\tmsgStreamStart:                 true,\n\t\tmsgCreatingStream:              true,\n\t\tmsgAggregateSubclass:           true,\n\t}\n)\n\nfunc (c *Controller) gstLog(\n\tcat *gst.DebugCategory,\n\tlevel gst.DebugLevel,\n\tfile, function string, line int,\n\t_ *gst.LoggedObject,\n\tdebugMsg *gst.DebugMessage,\n) {\n\tcategory := cat.GetName()\n\tmessage := debugMsg.Get()\n\tlvl, ok := logLevels[level]\n\tif !ok || ignore[message] || ignore[function] {\n\t\treturn\n\t}\n\n\tif category == catRtmpClient {\n\t\tif function == fnSendCreateStream {\n\t\t\tstreamID := strings.Split(message, \"'\")[1]\n\t\t\tc.updateStreamStartTime(streamID)\n\t\t}\n\t\treturn\n\t}\n\n\tvar msg string\n\tif function != \"\" {\n\t\tmsg = fmt.Sprintf(\"[%s %s] %s: %s\", category, lvl, function, message)\n\t} else {\n\t\tmsg = fmt.Sprintf(\"[%s %s] %s\", category, lvl, message)\n\t}\n\tcaller := fmt.Sprintf(\"%s:%d\", file, line)\n\tc.gstLogger.Infow(msg, \"caller\", caller)\n}\n\nfunc (c *Controller) messageWatch(msg *gst.Message) bool {\n\tvar err error\n\tswitch msg.Type() {\n\tcase gst.MessageEOS:\n\t\tlogger.Infow(\"pipeline received EOS\")\n\t\tif c.eosTimer != nil {\n\t\t\tc.eosTimer.Stop()\n\t\t}\n\t\t// Capture pipeline running time at EOS — all content has been flushed\n\t\t// to sinks at this point, so this reflects the actual file duration.\n\t\t// Used as a floor for endedAt to account for pipeline-generated content\n\t\t// beyond the last RTP packet (e.g. mixer silence after all tracks leave).\n\t\tif rt, ok := c.p.RunningTime(); ok {\n\t\t\tc.pipelineEndedAt = c.src.GetStartedAt() + rt.Nanoseconds()\n\t\t}\n\t\tc.eosReceived.Break()\n\t\tc.p.Stop()\n\t\treturn false\n\tcase gst.MessageWarning:\n\t\terr = c.handleMessageWarning(msg.ParseWarning())\n\tcase gst.MessageError:\n\t\terr = c.handleMessageError(msg.ParseError())\n\tcase gst.MessageStateChanged:\n\t\tc.handleMessageStateChanged(msg)\n\tcase gst.MessageElement:\n\t\terr = c.handleMessageElement(msg)\n\tcase gst.MessageQoS:\n\t\tc.handleMessageQoS(msg)\n\t}\n\tif err != nil {\n\t\tc.OnError(err)\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nconst (\n\tmsgClockProblem = \"GStreamer error: clock problem.\"\n)\n\nfunc (c *Controller) handleMessageWarning(gErr *gst.GError) error {\n\telement, name, message := parseDebugInfo(gErr)\n\n\tif gErr.Message() == msgClockProblem {\n\t\terr := errors.ErrGstPipelineError(gErr)\n\t\tlogger.Errorw(gErr.Error(), errors.New(message), \"element\", element)\n\t\treturn err\n\t}\n\n\tif element == elementGstSrtSink {\n\t\tstreamName := strings.Split(name, \"_\")[1]\n\t\tstream, err := c.getStreamSink().GetStream(streamName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn c.streamFailed(context.Background(), stream, gErr)\n\t}\n\n\tlogger.Warnw(gErr.Message(), errors.New(message), \"element\", element)\n\treturn nil\n}\n\nconst (\n\telementGstAppSrc       = \"GstAppSrc\"\n\telementGstRtmp2Sink    = \"GstRtmp2Sink\"\n\telementGstSplitMuxSink = \"GstSplitMuxSink\"\n\telementGstSrtSink      = \"GstSRTSink\"\n\n\tmsgStreamingNotNegotiated = \"streaming stopped, reason not-negotiated (-4)\"\n\tmsgMuxer                  = \":muxer\"\n)\n\n// handleMessageError returns true if the error has been handled, false if the pipeline should quit\nfunc (c *Controller) handleMessageError(gErr *gst.GError) error {\n\telement, name, message := parseDebugInfo(gErr)\n\n\tswitch element {\n\tcase elementGstRtmp2Sink:\n\t\tstreamSink := c.getStreamSink()\n\n\t\tstreamName := strings.Split(name, \"_\")[1]\n\t\tstream, err := streamSink.GetStream(streamName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !c.eosSent.IsBroken() {\n\t\t\t// try reconnecting\n\t\t\tok, err := streamSink.ResetStream(stream, gErr)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"failed to reset stream\", err)\n\t\t\t} else if ok {\n\t\t\t\tc.trackStreamRetry(context.Background(), stream)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\t// remove sink\n\t\treturn c.streamFailed(context.Background(), stream, gErr)\n\n\tcase elementGstSrtSink:\n\t\tstreamName := strings.Split(name, \"_\")[1]\n\t\tstream, err := c.getStreamSink().GetStream(streamName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn c.streamFailed(context.Background(), stream, gErr)\n\n\tcase elementGstAppSrc:\n\t\tif message == msgStreamingNotNegotiated {\n\t\t\t// send eosSent to app src\n\t\t\tlogger.Debugw(\"streaming stopped\", \"name\", name)\n\t\t\tif sdkSrc, ok := c.src.(*source.SDKSource); ok {\n\t\t\t\tsdkSrc.StreamStopped(name)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\tcase elementGstSplitMuxSink:\n\t\t// We sometimes get GstSplitMuxSink errors if EOS was received before any data\n\t\tif message == msgMuxer {\n\t\t\tif c.eosSent.IsBroken() {\n\t\t\t\tlogger.Debugw(\"GstSplitMuxSink failure after sending EOS\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n\n\t// input failure or file write failure. Fatal\n\terr := errors.ErrGstPipelineError(gErr)\n\tlogger.Errorw(gErr.Error(), errors.New(message), \"element\", element, \"name\", name)\n\treturn err\n}\n\nfunc (c *Controller) handleMessageStateChanged(msg *gst.Message) {\n\toldState, newState := msg.ParseStateChanged()\n\ts := msg.Source()\n\tif s == pipelineName {\n\t\tif newState == gst.StatePaused {\n\t\t\tc.paused.Once(func() {\n\t\t\t\tlogger.Infow(\"pipeline paused\")\n\t\t\t\tc.callbacks.OnPipelinePaused()\n\t\t\t})\n\t\t}\n\t\tif newState == gst.StatePlaying {\n\t\t\tc.playing.Once(func() {\n\t\t\t\tvar timeToPlaying time.Duration\n\n\t\t\t\tif !c.pipelineCreatedAt.IsZero() {\n\t\t\t\t\ttimeToPlaying = time.Since(c.pipelineCreatedAt)\n\t\t\t\t}\n\n\t\t\t\tlogger.Infow(\"pipeline playing\", \"timeToPlaying\", timeToPlaying)\n\t\t\t\tc.updateStartTime(c.src.GetStartedAt())\n\t\t\t})\n\t\t}\n\t\treturn\n\t}\n\n\tif strings.HasPrefix(s, \"app_\") {\n\t\ttrackID := s[4:]\n\t\tlogger.Debugw(\"appsrc state change\", \"trackID\", trackID, \"oldState\", oldState.String(), \"newState\", newState.String())\n\t\tif newState == gst.StatePlaying {\n\t\t\tif sdkSrc, ok := c.src.(*source.SDKSource); ok {\n\t\t\t\tsdkSrc.Playing(trackID)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}\n\nconst (\n\tmsgFirstSampleMetadata = \"FirstSampleMetadata\"\n\tmsgFragmentOpened      = \"splitmuxsink-fragment-opened\"\n\tmsgFragmentClosed      = \"splitmuxsink-fragment-closed\"\n\tmsgGstMultiFileSink    = \"GstMultiFileSink\"\n)\n\nfunc (c *Controller) handleMessageElement(msg *gst.Message) error {\n\ts := msg.GetStructure()\n\tif s != nil {\n\t\tswitch s.Name() {\n\t\tcase gstreamer.LeakyQueueStatsMessage:\n\t\t\tqueueName, dropped, err := parseLeakyQueueStats(s)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Debugw(\"failed to parse leaky queue stats message\", err)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif strings.HasPrefix(queueName, \"video\") {\n\t\t\t\tc.stats.droppedVideoBuffers.Add(dropped)\n\t\t\t\tc.stats.droppedVideoBuffersByQueue[queueName] = dropped\n\t\t\t}\n\t\t\tif strings.HasPrefix(queueName, \"audio\") {\n\t\t\t\tc.stats.queuesDroppedAudioBuffers.Add(dropped)\n\t\t\t\tc.stats.droppedAudioBuffersByQueue[queueName] = dropped\n\t\t\t}\n\n\t\tcase msgFirstSampleMetadata:\n\t\t\tstartDate, err := getFirstSampleMetadataFromGstStructure(s)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlogger.Debugw(\"received FirstSampleMetadata message\", \"startDate\", startDate)\n\n\t\t\tc.getSegmentSink().UpdateStartDate(startDate)\n\n\t\tcase msgFragmentOpened:\n\t\t\tfilepath, t, err := getSegmentParamsFromGstStructure(s)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"failed to retrieve segment parameters from event\", err)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err = c.getSegmentSink().FragmentOpened(filepath, t); err != nil {\n\t\t\t\tlogger.Errorw(\"failed to register new segment with playlist writer\", err, \"location\", filepath, \"runningTime\", t)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase msgFragmentClosed:\n\t\t\tfilepath, t, err := getSegmentParamsFromGstStructure(s)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorw(\"failed to retrieve segment parameters from event\", err, \"location\", filepath, \"runningTime\", t)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// We need to dispatch to a queue to:\n\t\t\t// 1. Avoid concurrent access to the SegmentsInfo structure\n\t\t\t// 2. Ensure that playlists are uploaded in the same order they are enqueued to avoid an older playlist overwriting a newer one\n\t\t\tif err = c.getSegmentSink().FragmentClosed(filepath, t); err != nil {\n\t\t\t\tlogger.Errorw(\"failed to end segment with playlist writer\", err, \"runningTime\", t)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase msgGstMultiFileSink:\n\t\t\tlocation, ts, err := getImageInformationFromGstStructure(s)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\timageSink := c.getImageSink(msg.Source())\n\t\t\tif imageSink == nil {\n\t\t\t\treturn errors.ErrSinkNotFound\n\t\t\t}\n\n\t\t\terr = imageSink.NewImage(location, ts)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc parseLeakyQueueStats(s *gst.Structure) (queue string, dropped uint64, err error) {\n\tqueueValue, err := s.GetValue(\"queue\")\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tqueue, _ = queueValue.(string)\n\n\tdroppedValue, err := s.GetValue(\"dropped\")\n\tif err != nil {\n\t\treturn queue, 0, err\n\t}\n\tdropped = normalizeUint64(droppedValue)\n\treturn queue, dropped, nil\n}\n\nfunc normalizeUint64(value interface{}) uint64 {\n\tswitch v := value.(type) {\n\tcase uint64:\n\t\treturn v\n\tcase uint:\n\t\treturn uint64(v)\n\tcase uint32:\n\t\treturn uint64(v)\n\tcase int:\n\t\tif v > 0 {\n\t\t\treturn uint64(v)\n\t\t}\n\tcase int64:\n\t\tif v > 0 {\n\t\t\treturn uint64(v)\n\t\t}\n\tcase int32:\n\t\tif v > 0 {\n\t\t\treturn uint64(v)\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc (c *Controller) handleMessageQoS(msg *gst.Message) {\n\tif isQosForAudioMixer(msg) {\n\t\tqos := msg.ParseQoS()\n\t\tif qos == nil {\n\t\t\tlogger.Debugw(\"failed to parse audio mixer QoS message\")\n\t\t\treturn\n\t\t}\n\t\tc.handleAudioMixerQoS(qos)\n\t\treturn\n\t}\n}\n\nfunc (c *Controller) handleAudioMixerQoS(qosValues *gst.QoSValues) {\n\tc.stats.mixerDroppedAudioBuffers.Inc()\n\tc.stats.mixerDroppedAudioDuration.Add(qosValues.Duration)\n}\n\n// Debug info comes in the following format:\n// file.c(line): method_name (): /GstPipeline:pipeline/GstBin:bin_name/GstElement:element_name:\\nError message\nvar gstDebug = regexp.MustCompile(\"(?s)(.*?)GstPipeline:pipeline/GstBin:(.*?)/(.*?):([^:]*)(:\\n)?(.*)\")\n\nfunc parseDebugInfo(gErr *gst.GError) (element, name, message string) {\n\tmatch := gstDebug.FindStringSubmatch(gErr.DebugString())\n\n\tif len(match) == 0 {\n\t\treturn\n\t}\n\n\telement = match[3]\n\tname = match[4]\n\tmessage = match[6]\n\treturn\n}\n\nconst (\n\tfragmentLocation    = \"location\"\n\tfragmentRunningTime = \"running-time\"\n)\n\nfunc getSegmentParamsFromGstStructure(s *gst.Structure) (filepath string, time uint64, err error) {\n\tloc, err := s.GetValue(fragmentLocation)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tfilepath, ok := loc.(string)\n\tif !ok {\n\t\treturn \"\", 0, errors.ErrGstPipelineError(errors.New(\"invalid type for location\"))\n\t}\n\n\tt, err := s.GetValue(fragmentRunningTime)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tti, ok := t.(uint64)\n\tif !ok {\n\t\treturn \"\", 0, errors.ErrGstPipelineError(errors.New(\"invalid type for time\"))\n\t}\n\n\treturn filepath, ti, nil\n}\n\nfunc getFirstSampleMetadataFromGstStructure(s *gst.Structure) (startDate time.Time, err error) {\n\tfirstSampleMetadata := builder.FirstSampleMetadata{}\n\terr = s.UnmarshalInto(&firstSampleMetadata)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\n\treturn time.Unix(0, firstSampleMetadata.StartDate), nil\n}\n\nconst (\n\tgstMultiFileSinkFilename  = \"filename\"\n\tgstMultiFileSinkTimestamp = \"timestamp\"\n)\n\nfunc getImageInformationFromGstStructure(s *gst.Structure) (string, uint64, error) {\n\tloc, err := s.GetValue(gstMultiFileSinkFilename)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tfilepath, ok := loc.(string)\n\tif !ok {\n\t\treturn \"\", 0, errors.ErrGstPipelineError(errors.New(\"invalid type for location\"))\n\t}\n\n\tt, err := s.GetValue(gstMultiFileSinkTimestamp)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tti, ok := t.(uint64)\n\tif !ok {\n\t\treturn \"\", 0, errors.ErrGstPipelineError(errors.New(\"invalid type for time\"))\n\t}\n\n\treturn filepath, ti, nil\n\n}\n\nfunc isQosForAudioMixer(msg *gst.Message) bool {\n\tsrc := msg.SourceObject()\n\tif src == nil {\n\t\treturn false\n\t}\n\n\tsrcName := src.GetName()\n\tparent := src.GetParent()\n\n\tvar parentName string\n\tif parent != nil {\n\t\tparentName = parent.GetName()\n\t}\n\n\t// a bit brittle as it relies on mixer name not being changed\n\treturn strings.HasPrefix(srcName, \"sink_\") && strings.HasPrefix(parentName, \"audiomixer\")\n}\n"
  },
  {
    "path": "pkg/server/integration.go",
    "content": "// Copyright 2026 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage server\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com/livekit/protocol/rpc\"\n)\n\nfunc (s *Server) ReplayReady(context.Context, *rpc.EgressReadyRequest) (*rpc.EgressReadyResponse, error) {\n\treturn &rpc.EgressReadyResponse{\n\t\tStartAt:    time.Now().UnixNano(),\n\t\tDurationMs: 0,\n\t}, nil\n}\n"
  },
  {
    "path": "pkg/server/server.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage server\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com/frostbyte73/core\"\n\t\"go.uber.org/atomic\"\n\t\"google.golang.org/grpc\"\n\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/rpc\"\n\t\"github.com/livekit/psrpc\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/info\"\n\t\"github.com/livekit/egress/pkg/ipc\"\n\t\"github.com/livekit/egress/pkg/service\"\n\t\"github.com/livekit/egress/pkg/stats\"\n\t\"github.com/livekit/egress/version\"\n)\n\ntype Server struct {\n\tipc.UnimplementedEgressServiceServer\n\n\tconf *config.ServiceConfig\n\n\tservice.ProcessManager\n\t*service.MetricsService\n\t*service.DebugService\n\tmonitor *stats.Monitor\n\n\tpsrpcServer      rpc.EgressInternalServer\n\tipcServiceServer *grpc.Server\n\tpromServer       *http.Server\n\tioClient         info.SessionReporter\n\n\tactiveRequests atomic.Int32\n\tterminating    core.Fuse\n\tshutdown       core.Fuse\n}\n\nfunc NewServer(conf *config.ServiceConfig, bus psrpc.MessageBus, ioClient info.SessionReporter) (*Server, error) {\n\tpm := service.NewProcessManager()\n\n\ts := &Server{\n\t\tconf:             conf,\n\t\tProcessManager:   pm,\n\t\tMetricsService:   service.NewMetricsService(pm),\n\t\tDebugService:     service.NewDebugService(pm),\n\t\tipcServiceServer: grpc.NewServer(),\n\t\tioClient:         ioClient,\n\t}\n\n\tioClient.SetWatchdogHandler(func() {\n\t\tlogger.Errorw(\"shutting down server on io client watchdog trigger\", errors.New(\"io client failure\"))\n\t\ts.Shutdown(false, false)\n\t})\n\n\tmonitor, err := stats.NewMonitor(conf, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.monitor = monitor\n\n\tif conf.DebugHandlerPort > 0 {\n\t\ts.StartDebugHandlers(conf.DebugHandlerPort)\n\t}\n\n\tif conf.PrometheusPort > 0 {\n\t\ts.promServer = &http.Server{\n\t\t\tAddr:    fmt.Sprintf(\":%d\", conf.PrometheusPort),\n\t\t\tHandler: s.PromHandler(),\n\t\t}\n\n\t\tpromListener, err := net.Listen(\"tcp\", s.promServer.Addr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tgo func() {\n\t\t\t_ = s.promServer.Serve(promListener)\n\t\t}()\n\t}\n\n\tipcSvcDir := path.Join(config.TmpDir, s.conf.NodeID)\n\tif err = os.MkdirAll(ipcSvcDir, 0755); err != nil {\n\t\treturn nil, err\n\t}\n\n\tipc.RegisterEgressServiceServer(s.ipcServiceServer, s)\n\tif err := ipc.StartServiceListener(s.ipcServiceServer, ipcSvcDir); err != nil {\n\t\treturn nil, err\n\t}\n\n\tpsrpcServer, err := rpc.NewEgressInternalServer(s, bus)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = psrpcServer.RegisterListActiveEgressTopic(\"\"); err != nil {\n\t\treturn nil, err\n\t}\n\ts.psrpcServer = psrpcServer\n\n\treturn s, nil\n}\n\nfunc (s *Server) StartTemplatesServer(fs fs.FS) error {\n\tif s.conf.TemplatePort == 0 {\n\t\tlogger.Debugw(\"templates server disabled\")\n\t\treturn nil\n\t}\n\n\th := http.FileServer(http.FS(fs))\n\n\tmux := http.NewServeMux()\n\tmux.Handle(\"/\", h)\n\n\tgo func() {\n\t\taddr := fmt.Sprintf(\"localhost:%d\", s.conf.TemplatePort)\n\t\tlogger.Debugw(fmt.Sprintf(\"starting template server on address %s\", addr))\n\t\t_ = http.ListenAndServe(addr, mux)\n\t}()\n\n\treturn nil\n}\n\nfunc (s *Server) Run() error {\n\tlogger.Debugw(\"starting service\", \"version\", version.Version)\n\n\tif err := s.psrpcServer.RegisterStartEgressTopic(s.conf.ClusterID); err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Infow(\"service ready\")\n\t<-s.shutdown.Watch()\n\tlogger.Infow(\"draining\")\n\ts.Drain()\n\tlogger.Infow(\"service stopped\")\n\treturn nil\n}\n\nfunc (s *Server) Status() ([]byte, error) {\n\tstatus := map[string]interface{}{\n\t\t\"CpuLoad\": s.monitor.GetAvailableCPU(),\n\t}\n\ts.GetStatus(status)\n\treturn json.Marshal(status)\n}\n\nfunc (s *Server) IsIdle() bool {\n\treturn s.activeRequests.Load() == 0\n}\n\nfunc (s *Server) IsDisabled() bool {\n\treturn s.shutdown.IsBroken() || !s.ioClient.IsHealthy()\n}\n\nfunc (s *Server) IsTerminating() bool {\n\treturn s.terminating.IsBroken()\n}\n\nfunc (s *Server) Shutdown(terminating, kill bool) {\n\tif terminating {\n\t\ts.terminating.Break()\n\t}\n\ts.shutdown.Once(func() {\n\t\ts.psrpcServer.DeregisterStartEgressTopic(s.conf.ClusterID)\n\t})\n\tif kill {\n\t\ts.KillAll()\n\t}\n}\n\nfunc (s *Server) Drain() {\n\tfor !s.IsIdle() {\n\t\ttime.Sleep(time.Second)\n\t}\n\n\ts.psrpcServer.Shutdown()\n\tlogger.Infow(\"draining io client\")\n\ts.ioClient.Drain()\n}\n"
  },
  {
    "path": "pkg/server/server_ipc.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage server\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\n\t\"google.golang.org/protobuf/types/known/emptypb\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/ipc\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n)\n\nfunc (s *Server) HandlerReady(_ context.Context, req *ipc.HandlerReadyRequest) (*emptypb.Empty, error) {\n\tlogger.Debugw(\"handler ready\", \"egressID\", req.EgressId)\n\tif err := s.HandlerStarted(req.EgressId); err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger.Debugw(\"handler ready completed\", \"egressID\", req.EgressId)\n\treturn &emptypb.Empty{}, nil\n}\n\nfunc (s *Server) HandlerUpdate(_ context.Context, info *livekit.EgressInfo) (*emptypb.Empty, error) {\n\tlogger.Debugw(\"handler update\", \"egressID\", info.EgressId)\n\tif err := s.ioClient.UpdateEgress(context.Background(), info); err != nil {\n\t\tlogger.Errorw(\"failed to update egress\", err, \"egressID\", info.EgressId)\n\t}\n\n\tif info.ErrorCode == int32(http.StatusInternalServerError) {\n\t\tlogger.Errorw(\"internal error, shutting down\", errors.New(info.Error))\n\t\ts.Shutdown(false, false)\n\t}\n\tlogger.Debugw(\"handler update completed\", \"egressID\", info.EgressId)\n\n\treturn &emptypb.Empty{}, nil\n}\n\nfunc (s *Server) HandlerFinished(_ context.Context, req *ipc.HandlerFinishedRequest) (*emptypb.Empty, error) {\n\tlogger.Debugw(\"handler finished\", \"egressID\", req.EgressId)\n\tif err := s.ioClient.UpdateEgress(context.Background(), req.Info); err != nil {\n\t\tlogger.Errorw(\"failed to update egress\", err, \"egressID\", req.EgressId)\n\t}\n\n\tif err := s.StoreProcessEndedMetrics(req.EgressId, req.Metrics); err != nil {\n\t\tlogger.Errorw(\"failed to store metrics\", err, \"egressID\", req.EgressId)\n\t}\n\n\tlogger.Debugw(\"handler finished completed\", \"egressID\", req.EgressId)\n\treturn &emptypb.Empty{}, nil\n}\n\nfunc (s *Server) StorageEvent(_ context.Context, _ *ipc.StorageEventRequest) (*emptypb.Empty, error) {\n\treturn &emptypb.Empty{}, nil\n}\n"
  },
  {
    "path": "pkg/server/server_rpc.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage server\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"google.golang.org/protobuf/encoding/protojson\"\n\t\"gopkg.in/yaml.v3\"\n\n\t\"go.opentelemetry.io/otel\"\n\n\t\"github.com/livekit/protocol/egress\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/rpc\"\n\t\"github.com/livekit/protocol/utils\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/logging\"\n)\n\nvar (\n\ttracer = otel.Tracer(\"github.com/livekit/egress/pkg/server\")\n)\n\nfunc (s *Server) StartEgress(ctx context.Context, req *rpc.StartEgressRequest) (*livekit.EgressInfo, error) {\n\ts.activeRequests.Inc()\n\n\tctx, span := tracer.Start(ctx, \"Service.StartEgress\")\n\tdefer span.End()\n\n\tif s.IsDisabled() {\n\t\ts.activeRequests.Dec()\n\t\treturn nil, errors.ErrShuttingDown\n\t}\n\tif s.AlreadyExists(req.EgressId) {\n\t\ts.activeRequests.Dec()\n\t\treturn nil, errors.ErrEgressAlreadyExists\n\t}\n\tif err := s.monitor.AcceptRequest(req); err != nil {\n\t\ts.activeRequests.Dec()\n\t\treturn nil, err\n\t}\n\n\tlogger.Infow(\"request received\", \"egressID\", req.EgressId)\n\n\tp, err := config.GetValidatedPipelineConfig(s.conf, req)\n\tif err != nil {\n\t\ts.monitor.EgressAborted(req)\n\t\ts.activeRequests.Dec()\n\t\treturn nil, err\n\t}\n\n\tvar typesInput any = p.Info.Request\n\tif e, ok := p.Info.Request.(*livekit.EgressInfo_Replay); ok {\n\t\ttypesInput = e.Replay\n\t}\n\trequestType, outputType := egress.GetTypes(typesInput)\n\tlogger.Infow(\"request validated\",\n\t\t\"egressID\", req.EgressId,\n\t\t\"requestType\", requestType,\n\t\t\"sourceType\", p.Info.SourceType,\n\t\t\"outputType\", outputType,\n\t\t\"room\", p.Info.RoomName,\n\t\t\"request\", p.Info.Request,\n\t)\n\n\terrChan := s.ioClient.CreateEgress(ctx, p.Info)\n\tlaunchErr := s.launchProcess(req, p.Info)\n\tcreateErr := <-errChan\n\n\tswitch {\n\tcase launchErr != nil && createErr != nil:\n\t\ts.processEnded(req, p.Info, nil)\n\t\treturn nil, launchErr\n\n\tcase launchErr != nil:\n\t\ts.processEnded(req, p.Info, launchErr)\n\t\treturn nil, launchErr\n\n\tcase createErr != nil:\n\t\t// launched but failed to save - abort and return error\n\t\tp.Info.Error = createErr.Error()\n\t\tp.Info.ErrorCode = int32(http.StatusInternalServerError)\n\t\ts.AbortProcess(req.EgressId, createErr)\n\t\treturn nil, createErr\n\n\tdefault:\n\t\treturn p.Info, nil\n\t}\n}\n\nfunc (s *Server) launchProcess(req *rpc.StartEgressRequest, info *livekit.EgressInfo) error {\n\t_, span := tracer.Start(context.Background(), \"Service.launchProcess\")\n\tdefer span.End()\n\n\ts.monitor.EgressStarted(req)\n\n\thandlerID := utils.NewGuid(\"EGH_\")\n\tp := &config.PipelineConfig{\n\t\tBaseConfig: s.conf.BaseConfig,\n\t\tHandlerID:  handlerID,\n\t\tTmpDir:     path.Join(config.TmpDir, req.EgressId),\n\t}\n\n\tconfString, err := yaml.Marshal(p)\n\tif err != nil {\n\t\tspan.RecordError(err)\n\t\tlogger.Errorw(\"could not marshal config\", err)\n\t\treturn err\n\t}\n\n\treqString, err := protojson.Marshal(req)\n\tif err != nil {\n\t\tspan.RecordError(err)\n\t\tlogger.Errorw(\"could not marshal request\", err)\n\t\treturn err\n\t}\n\n\tcmd := exec.Command(\"egress\",\n\t\t\"run-handler\",\n\t\t\"--config\", string(confString),\n\t\t\"--request\", string(reqString),\n\t)\n\tcmd.Dir = \"/\"\n\n\tl := logging.NewHandlerLogger(handlerID, req.EgressId)\n\tcmd.Stdout = l\n\tcmd.Stderr = l\n\tcmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}\n\n\tif err = s.Launch(context.Background(), handlerID, req, info, cmd); err != nil {\n\t\treturn err\n\t}\n\n\ts.monitor.UpdatePID(info.EgressId, cmd.Process.Pid)\n\tgo func() {\n\t\terr = cmd.Wait()\n\t\t_ = l.Close()\n\t\ts.processEnded(req, info, err)\n\t}()\n\treturn nil\n}\n\nfunc (s *Server) processEnded(req *rpc.StartEgressRequest, info *livekit.EgressInfo, err error) {\n\tif err != nil {\n\t\t// should only happen if process failed catashrophically\n\t\tnow := time.Now().UnixNano()\n\t\tinfo.UpdatedAt = now\n\t\tinfo.EndedAt = now\n\t\tinfo.Status = livekit.EgressStatus_EGRESS_FAILED\n\t\tif info.Error == \"\" {\n\t\t\tinfo.Error = err.Error()\n\t\t\tinfo.ErrorCode = int32(http.StatusInternalServerError)\n\t\t}\n\t\t_ = s.ioClient.UpdateEgress(context.Background(), info)\n\n\t\tlogger.Errorw(\"process failed\", err, \"egressID\", info.EgressId)\n\t}\n\n\tavgCPU, maxCPU, maxMemory := s.monitor.EgressEnded(req)\n\tif maxCPU > 0 {\n\t\tlogger.Debugw(\"egress metrics\",\n\t\t\t\"egressID\", info.EgressId,\n\t\t\t\"avgCPU\", avgCPU,\n\t\t\t\"maxCPU\", maxCPU,\n\t\t\t\"maxMemory\", maxMemory,\n\t\t)\n\t}\n\n\t// Make sure we delete all the handler context regardless of the handler termination status\n\ttmpDir := path.Join(config.TmpDir, req.EgressId)\n\tos.RemoveAll(tmpDir)\n\n\ts.ProcessFinished(info.EgressId)\n\ts.activeRequests.Dec()\n}\n\nfunc (s *Server) StartEgressAffinity(_ context.Context, req *rpc.StartEgressRequest) float32 {\n\tif s.IsDisabled() || !s.monitor.CanAcceptRequest(req) {\n\t\t// cannot accept\n\t\treturn -1\n\t}\n\n\tif s.activeRequests.Load() == 0 {\n\t\t// group multiple track and track composite requests.\n\t\t// if this instance is idle and another is already handling some, the request will go to that server.\n\t\t// this avoids having many instances with one track request each, taking availability from room composite.\n\t\treturn 0.5\n\t}\n\t// already handling a request and has available cpu\n\treturn 1\n}\n\nfunc (s *Server) ListActiveEgress(ctx context.Context, _ *rpc.ListActiveEgressRequest) (*rpc.ListActiveEgressResponse, error) {\n\t_, span := tracer.Start(ctx, \"Service.ListActiveEgress\")\n\tdefer span.End()\n\n\treturn &rpc.ListActiveEgressResponse{\n\t\tEgressIds: s.GetActiveEgressIDs(),\n\t}, nil\n}\n"
  },
  {
    "path": "pkg/service/debug.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage service\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/pprof\"\n\t\"github.com/livekit/psrpc\"\n\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/ipc\"\n)\n\nconst (\n\tgstPipelineDotFileApp = \"gst_pipeline\"\n\tpprofApp              = \"pprof\"\n)\n\ntype DebugService struct {\n\tpm ProcessManager\n}\n\nfunc NewDebugService(pm ProcessManager) *DebugService {\n\treturn &DebugService{\n\t\tpm: pm,\n\t}\n}\n\nfunc (s *DebugService) StartDebugHandlers(port int) {\n\tif port == 0 {\n\t\tlogger.Debugw(\"debug handler disabled\")\n\t\treturn\n\t}\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(fmt.Sprintf(\"/%s/\", gstPipelineDotFileApp), s.handleGstPipelineDotFile)\n\tmux.HandleFunc(fmt.Sprintf(\"/%s/\", pprofApp), s.handlePProf)\n\n\tgo func() {\n\t\taddr := fmt.Sprintf(\":%d\", port)\n\t\tlogger.Debugw(fmt.Sprintf(\"starting debug handler on address %s\", addr))\n\t\t_ = http.ListenAndServe(addr, mux)\n\t}()\n}\n\n// URL path format is \"/<application>/<egress_id>/<optional_other_params>\"\nfunc (s *DebugService) handleGstPipelineDotFile(w http.ResponseWriter, r *http.Request) {\n\tpathElements := strings.Split(r.URL.Path, \"/\")\n\tif len(pathElements) < 3 {\n\t\thttp.Error(w, \"malformed url\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tegressID := pathElements[2]\n\tdotFile, err := s.GetGstPipelineDotFile(egressID)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), getErrorCode(err))\n\t\treturn\n\t}\n\t_, _ = w.Write([]byte(dotFile))\n}\n\nfunc (s *DebugService) GetGstPipelineDotFile(egressID string) (string, error) {\n\tc, err := s.pm.GetGRPCClient(egressID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tres, err := c.GetPipelineDot(context.Background(), &ipc.GstPipelineDebugDotRequest{})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn res.DotFile, nil\n}\n\n// URL path format is \"/<application>/<egress_id>/<profile_name>\" or \"/<application>/<profile_name>\" to profile the service\nfunc (s *DebugService) handlePProf(w http.ResponseWriter, r *http.Request) {\n\tvar err error\n\tvar b []byte\n\n\ttimeout, err := strconv.ParseInt(r.URL.Query().Get(\"timeout\"), 10, 32)\n\tif err != nil {\n\t\thttp.Error(w, \"bad timeout parameter\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tdebug, err := strconv.ParseInt(r.URL.Query().Get(\"debug\"), 10, 32)\n\tif err != nil {\n\t\thttp.Error(w, \"bad debug parameter\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tpathElements := strings.Split(r.URL.Path, \"/\")\n\tswitch len(pathElements) {\n\tcase 3:\n\t\t// profile main service\n\t\tb, err = pprof.GetProfileData(context.Background(), pathElements[2], int(timeout), int(debug))\n\n\tcase 4:\n\t\tegressID := pathElements[2]\n\t\tc, err := s.pm.GetGRPCClient(egressID)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"handler not found\", http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\tres, err := c.GetPProf(context.Background(), &ipc.PProfRequest{\n\t\t\tProfileName: pathElements[3],\n\t\t\tTimeout:     int32(timeout),\n\t\t\tDebug:       int32(debug),\n\t\t})\n\t\tif err == nil {\n\t\t\tb = res.PprofFile\n\t\t}\n\n\tdefault:\n\t\thttp.Error(w, \"malformed url\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif err == nil {\n\t\tw.Header().Add(\"Content-Type\", \"application/octet-stream\")\n\t\t_, err = w.Write(b)\n\t}\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), getErrorCode(err))\n\t\treturn\n\t}\n}\n\nfunc getErrorCode(err error) int {\n\tvar e psrpc.Error\n\n\tswitch {\n\tcase errors.As(err, &e):\n\t\treturn e.ToHttp()\n\tcase err == nil:\n\t\treturn http.StatusOK\n\tdefault:\n\t\treturn http.StatusInternalServerError\n\t}\n}\n"
  },
  {
    "path": "pkg/service/metrics.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage service\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"strings\"\n\n\t\"github.com/linkdata/deadlock\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/collectors\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n\tdto \"github.com/prometheus/client_model/go\"\n\t\"github.com/prometheus/common/expfmt\"\n\t\"github.com/prometheus/common/model\"\n\t\"golang.org/x/exp/maps\"\n\n\t\"github.com/livekit/protocol/logger\"\n\t\"go.opentelemetry.io/otel\"\n)\n\ntype MetricsService struct {\n\tpm ProcessManager\n\n\tmu             deadlock.Mutex\n\tpendingMetrics []*dto.MetricFamily\n}\n\nvar (\n\ttracer = otel.Tracer(\"github.com/livekit/egress/pkg/service\")\n)\n\nfunc NewMetricsService(pm ProcessManager) *MetricsService {\n\tprometheus.Unregister(collectors.NewGoCollector())\n\tprometheus.MustRegister(collectors.NewGoCollector(collectors.WithGoCollectorRuntimeMetrics(collectors.MetricsAll)))\n\n\treturn &MetricsService{\n\t\tpm: pm,\n\t}\n}\n\nfunc (s *MetricsService) PromHandler() http.Handler {\n\treturn promhttp.InstrumentMetricHandler(\n\t\tprometheus.DefaultRegisterer, promhttp.HandlerFor(s.CreateGatherer(), promhttp.HandlerOpts{}),\n\t)\n}\n\nfunc (s *MetricsService) CreateGatherer() prometheus.Gatherer {\n\treturn prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) {\n\t\t_, span := tracer.Start(context.Background(), \"Service.GathererOfHandlerMetrics\")\n\t\tdefer span.End()\n\n\t\tgatherers := prometheus.Gatherers{}\n\t\t// Include the default repo\n\t\tgatherers = append(gatherers, prometheus.DefaultGatherer)\n\t\t// Include Process ended ms\n\t\tgatherers = append(gatherers, prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) {\n\t\t\ts.mu.Lock()\n\t\t\tm := s.pendingMetrics\n\t\t\ts.pendingMetrics = nil\n\t\t\ts.mu.Unlock()\n\t\t\treturn m, nil\n\t\t}))\n\n\t\tgatherers = append(gatherers, s.pm.GetGatherers()...)\n\n\t\treturn gatherers.Gather()\n\t})\n}\n\nfunc (s *MetricsService) StoreProcessEndedMetrics(egressID string, metrics string) error {\n\tm, err := deserializeMetrics(egressID, metrics)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.mu.Lock()\n\ts.pendingMetrics = append(s.pendingMetrics, m...)\n\ts.mu.Unlock()\n\n\treturn nil\n}\n\nfunc deserializeMetrics(egressID string, s string) ([]*dto.MetricFamily, error) {\n\tparser := expfmt.NewTextParser(model.LegacyValidation)\n\tfamilies, err := parser.TextToMetricFamilies(strings.NewReader(s))\n\tif err != nil {\n\t\tlogger.Warnw(\"failed to parse ms from handler\", err, \"egress_id\", egressID)\n\t\treturn make([]*dto.MetricFamily, 0), nil // don't return an error, just skip this handler\n\t}\n\n\t// Add an egress_id label to every metric all the families, if it doesn't already have one\n\tapplyDefaultLabel(families, egressID)\n\n\treturn maps.Values(families), nil\n}\n\nfunc applyDefaultLabel(families map[string]*dto.MetricFamily, egressID string) {\n\tegressIDLabel := \"egress_id\"\n\tegressLabelPair := &dto.LabelPair{\n\t\tName:  &egressIDLabel,\n\t\tValue: &egressID,\n\t}\n\tfor _, family := range families {\n\t\tfor _, metric := range family.Metric {\n\t\t\tif metric.Label == nil {\n\t\t\t\tmetric.Label = make([]*dto.LabelPair, 0)\n\t\t\t}\n\t\t\tfound := false\n\t\t\tfor _, label := range metric.Label {\n\t\t\t\tif label.GetName() == \"egress_id\" {\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tmetric.Label = append(metric.Label, egressLabelPair)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "pkg/service/process.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage service\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/frostbyte73/core\"\n\t\"github.com/linkdata/deadlock\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tdto \"github.com/prometheus/client_model/go\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/ipc\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/rpc\"\n)\n\nconst launchTimeout = 10 * time.Second\n\n//go:generate go tool github.com/maxbrunsfeld/counterfeiter/v6  . ProcessManager\n\ntype ProcessManager interface {\n\tLaunch(ctx context.Context, handlerID string, req *rpc.StartEgressRequest, info *livekit.EgressInfo, cmd *exec.Cmd) error\n\tGetContext(egressID string) context.Context\n\tAlreadyExists(egressID string) bool\n\tHandlerStarted(egressID string) error\n\tGetActiveEgressIDs() []string\n\tGetStatus(info map[string]interface{})\n\tGetGatherers() []prometheus.Gatherer\n\tGetGRPCClient(egressID string) (ipc.EgressHandlerClient, error)\n\tKillAll()\n\tAbortProcess(egressID string, err error)\n\tKillProcess(egressID string, err error)\n\tProcessFinished(egressID string)\n}\n\ntype processManager struct {\n\tmu             deadlock.RWMutex\n\tactiveHandlers map[string]*Process\n}\n\nfunc NewProcessManager() ProcessManager {\n\treturn &processManager{\n\t\tactiveHandlers: make(map[string]*Process),\n\t}\n}\n\nfunc (pm *processManager) Launch(\n\tctx context.Context,\n\thandlerID string,\n\treq *rpc.StartEgressRequest,\n\tinfo *livekit.EgressInfo,\n\tcmd *exec.Cmd,\n) error {\n\tipcHandlerDir := path.Join(config.TmpDir, handlerID)\n\tif err := os.MkdirAll(ipcHandlerDir, 0755); err != nil {\n\t\treturn err\n\t}\n\n\tipcClient, err := ipc.NewHandlerClient(ipcHandlerDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := &Process{\n\t\tctx:              ctx,\n\t\thandlerID:        handlerID,\n\t\treq:              req,\n\t\tinfo:             info,\n\t\tcmd:              cmd,\n\t\tipcHandlerClient: ipcClient,\n\t\tready:            make(chan struct{}),\n\t}\n\n\tpm.mu.Lock()\n\tpm.activeHandlers[info.EgressId] = p\n\tpm.mu.Unlock()\n\n\tif err = cmd.Start(); err != nil {\n\t\tlogger.Errorw(\"could not launch process\", err)\n\t\treturn err\n\t}\n\n\tselect {\n\tcase <-p.ready:\n\t\treturn nil\n\n\tcase <-time.After(launchTimeout):\n\t\tlogger.Warnw(\"no response from handler\", nil, \"egressID\", info.EgressId)\n\t\t_ = cmd.Process.Kill()\n\t\t_ = cmd.Wait()\n\t\treturn errors.ErrHandlerFailedToStart\n\t}\n}\n\nfunc (pm *processManager) GetContext(egressID string) context.Context {\n\tpm.mu.RLock()\n\tdefer pm.mu.RUnlock()\n\n\tif p, ok := pm.activeHandlers[egressID]; ok {\n\t\treturn p.ctx\n\t}\n\n\treturn context.Background()\n}\n\nfunc (pm *processManager) AlreadyExists(egressID string) bool {\n\tpm.mu.RLock()\n\tdefer pm.mu.RUnlock()\n\n\t_, ok := pm.activeHandlers[egressID]\n\treturn ok\n}\n\nfunc (pm *processManager) HandlerStarted(egressID string) error {\n\tpm.mu.RLock()\n\tdefer pm.mu.RUnlock()\n\n\tif p, ok := pm.activeHandlers[egressID]; ok {\n\t\tclose(p.ready)\n\t\treturn nil\n\t}\n\n\treturn errors.ErrEgressNotFound\n}\n\nfunc (pm *processManager) GetActiveEgressIDs() []string {\n\tpm.mu.RLock()\n\tdefer pm.mu.RUnlock()\n\n\tegressIDs := make([]string, 0, len(pm.activeHandlers))\n\tfor egressID := range pm.activeHandlers {\n\t\tegressIDs = append(egressIDs, egressID)\n\t}\n\n\treturn egressIDs\n}\n\nfunc (pm *processManager) GetStatus(info map[string]interface{}) {\n\tpm.mu.RLock()\n\tdefer pm.mu.RUnlock()\n\n\tfor _, h := range pm.activeHandlers {\n\t\tinfo[h.req.EgressId] = h.req.Request\n\t}\n}\n\nfunc (pm *processManager) GetGatherers() []prometheus.Gatherer {\n\tpm.mu.RLock()\n\tdefer pm.mu.RUnlock()\n\n\thandlers := make([]prometheus.Gatherer, 0, len(pm.activeHandlers))\n\tfor _, p := range pm.activeHandlers {\n\t\thandlers = append(handlers, p)\n\t}\n\n\treturn handlers\n}\n\nfunc (pm *processManager) GetGRPCClient(egressID string) (ipc.EgressHandlerClient, error) {\n\tpm.mu.RLock()\n\tdefer pm.mu.RUnlock()\n\n\th, ok := pm.activeHandlers[egressID]\n\tif !ok {\n\t\treturn nil, errors.ErrEgressNotFound\n\t}\n\treturn h.ipcHandlerClient, nil\n}\n\nfunc (pm *processManager) KillAll() {\n\tpm.mu.RLock()\n\tdefer pm.mu.RUnlock()\n\n\tfor _, h := range pm.activeHandlers {\n\t\th.kill(errors.ErrShuttingDown)\n\t}\n}\n\nfunc (pm *processManager) AbortProcess(egressID string, err error) {\n\tlogger.Infow(\"aborting egress\", err, \"egressID\", egressID)\n\tpm.mu.RLock()\n\tdefer pm.mu.RUnlock()\n\n\tif h, ok := pm.activeHandlers[egressID]; ok {\n\t\tlogger.Warnw(\"aborting handler\", err, \"egressID\", egressID)\n\t\th.kill(err)\n\t\th.ipcHandlerClient.Close()\n\t\tdelete(pm.activeHandlers, egressID)\n\t}\n\tlogger.Infow(\"aborting egress completed\", \"egressID\", egressID)\n}\n\nfunc (pm *processManager) KillProcess(egressID string, err error) {\n\tlogger.Infow(\"killing egress\", err, \"egressID\", egressID)\n\tpm.mu.RLock()\n\tdefer pm.mu.RUnlock()\n\n\tif h, ok := pm.activeHandlers[egressID]; ok {\n\t\tlogger.Errorw(\"killing handler\", err, \"egressID\", egressID)\n\t\th.kill(err)\n\t}\n\tlogger.Infow(\"killing egress completed\", \"egressID\", egressID)\n}\n\nfunc (pm *processManager) ProcessFinished(egressID string) {\n\tlogger.Debugw(\"process finished\", \"egressID\", egressID)\n\tpm.mu.Lock()\n\tdefer pm.mu.Unlock()\n\n\tp, ok := pm.activeHandlers[egressID]\n\tif ok {\n\t\tlogger.Debugw(\"process finished, closing handler client\", \"egressID\", egressID)\n\t\tp.ipcHandlerClient.Close()\n\t\tp.closed.Break()\n\t}\n\n\tdelete(pm.activeHandlers, egressID)\n\tlogger.Debugw(\"process finished, deleted from active handlers\", \"egressID\", egressID)\n}\n\ntype Process struct {\n\tctx              context.Context\n\thandlerID        string\n\treq              *rpc.StartEgressRequest\n\tinfo             *livekit.EgressInfo\n\tcmd              *exec.Cmd\n\tipcHandlerClient *ipc.EgressHandlerClientWrapper\n\tready            chan struct{}\n\tclosed           core.Fuse\n}\n\n// Gather implements the prometheus.Gatherer interface on server-side to allow aggregation of handler ms\nfunc (p *Process) Gather() ([]*dto.MetricFamily, error) {\n\t// Get the ms from the handler via IPC\n\tmetricsResponse, err := p.ipcHandlerClient.GetMetrics(context.Background(), &ipc.MetricsRequest{})\n\tif err != nil {\n\t\tif !p.closed.IsBroken() {\n\t\t\tlogger.Warnw(\"failed to obtain ms from handler\", err, \"egressID\", p.req.EgressId)\n\t\t}\n\t\treturn make([]*dto.MetricFamily, 0), nil // don't return an error, just skip this handler\n\t}\n\n\t// Parse the result to match the Gatherer interface\n\treturn deserializeMetrics(p.info.EgressId, metricsResponse.Metrics)\n}\n\nfunc (p *Process) kill(e error) {\n\tp.closed.Once(func() {\n\t\tif _, err := p.ipcHandlerClient.KillEgress(p.ctx, &ipc.KillEgressRequest{\n\t\t\tError: e.Error(),\n\t\t}); err != nil {\n\t\t\tif err = p.cmd.Process.Signal(syscall.SIGINT); err != nil {\n\t\t\t\tlogger.Errorw(\"failed to kill Process\", err, \"egressID\", p.req.EgressId)\n\t\t\t}\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "pkg/service/servicefakes/fake_process_manager.go",
    "content": "// Code generated by counterfeiter. DO NOT EDIT.\npackage servicefakes\n\nimport (\n\t\"context\"\n\t\"os/exec\"\n\t\"sync\"\n\n\t\"github.com/livekit/egress/pkg/ipc\"\n\t\"github.com/livekit/egress/pkg/service\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/rpc\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\ntype FakeProcessManager struct {\n\tAbortProcessStub        func(string, error)\n\tabortProcessMutex       sync.RWMutex\n\tabortProcessArgsForCall []struct {\n\t\targ1 string\n\t\targ2 error\n\t}\n\tAlreadyExistsStub        func(string) bool\n\talreadyExistsMutex       sync.RWMutex\n\talreadyExistsArgsForCall []struct {\n\t\targ1 string\n\t}\n\talreadyExistsReturns struct {\n\t\tresult1 bool\n\t}\n\talreadyExistsReturnsOnCall map[int]struct {\n\t\tresult1 bool\n\t}\n\tGetActiveEgressIDsStub        func() []string\n\tgetActiveEgressIDsMutex       sync.RWMutex\n\tgetActiveEgressIDsArgsForCall []struct {\n\t}\n\tgetActiveEgressIDsReturns struct {\n\t\tresult1 []string\n\t}\n\tgetActiveEgressIDsReturnsOnCall map[int]struct {\n\t\tresult1 []string\n\t}\n\tGetContextStub        func(string) context.Context\n\tgetContextMutex       sync.RWMutex\n\tgetContextArgsForCall []struct {\n\t\targ1 string\n\t}\n\tgetContextReturns struct {\n\t\tresult1 context.Context\n\t}\n\tgetContextReturnsOnCall map[int]struct {\n\t\tresult1 context.Context\n\t}\n\tGetGRPCClientStub        func(string) (ipc.EgressHandlerClient, error)\n\tgetGRPCClientMutex       sync.RWMutex\n\tgetGRPCClientArgsForCall []struct {\n\t\targ1 string\n\t}\n\tgetGRPCClientReturns struct {\n\t\tresult1 ipc.EgressHandlerClient\n\t\tresult2 error\n\t}\n\tgetGRPCClientReturnsOnCall map[int]struct {\n\t\tresult1 ipc.EgressHandlerClient\n\t\tresult2 error\n\t}\n\tGetGatherersStub        func() []prometheus.Gatherer\n\tgetGatherersMutex       sync.RWMutex\n\tgetGatherersArgsForCall []struct {\n\t}\n\tgetGatherersReturns struct {\n\t\tresult1 []prometheus.Gatherer\n\t}\n\tgetGatherersReturnsOnCall map[int]struct {\n\t\tresult1 []prometheus.Gatherer\n\t}\n\tGetStatusStub        func(map[string]interface{})\n\tgetStatusMutex       sync.RWMutex\n\tgetStatusArgsForCall []struct {\n\t\targ1 map[string]interface{}\n\t}\n\tHandlerStartedStub        func(string) error\n\thandlerStartedMutex       sync.RWMutex\n\thandlerStartedArgsForCall []struct {\n\t\targ1 string\n\t}\n\thandlerStartedReturns struct {\n\t\tresult1 error\n\t}\n\thandlerStartedReturnsOnCall map[int]struct {\n\t\tresult1 error\n\t}\n\tKillAllStub        func()\n\tkillAllMutex       sync.RWMutex\n\tkillAllArgsForCall []struct {\n\t}\n\tKillProcessStub        func(string, error)\n\tkillProcessMutex       sync.RWMutex\n\tkillProcessArgsForCall []struct {\n\t\targ1 string\n\t\targ2 error\n\t}\n\tLaunchStub        func(context.Context, string, *rpc.StartEgressRequest, *livekit.EgressInfo, *exec.Cmd) error\n\tlaunchMutex       sync.RWMutex\n\tlaunchArgsForCall []struct {\n\t\targ1 context.Context\n\t\targ2 string\n\t\targ3 *rpc.StartEgressRequest\n\t\targ4 *livekit.EgressInfo\n\t\targ5 *exec.Cmd\n\t}\n\tlaunchReturns struct {\n\t\tresult1 error\n\t}\n\tlaunchReturnsOnCall map[int]struct {\n\t\tresult1 error\n\t}\n\tProcessFinishedStub        func(string)\n\tprocessFinishedMutex       sync.RWMutex\n\tprocessFinishedArgsForCall []struct {\n\t\targ1 string\n\t}\n\tinvocations      map[string][][]interface{}\n\tinvocationsMutex sync.RWMutex\n}\n\nfunc (fake *FakeProcessManager) AbortProcess(arg1 string, arg2 error) {\n\tfake.abortProcessMutex.Lock()\n\tfake.abortProcessArgsForCall = append(fake.abortProcessArgsForCall, struct {\n\t\targ1 string\n\t\targ2 error\n\t}{arg1, arg2})\n\tstub := fake.AbortProcessStub\n\tfake.recordInvocation(\"AbortProcess\", []interface{}{arg1, arg2})\n\tfake.abortProcessMutex.Unlock()\n\tif stub != nil {\n\t\tfake.AbortProcessStub(arg1, arg2)\n\t}\n}\n\nfunc (fake *FakeProcessManager) AbortProcessCallCount() int {\n\tfake.abortProcessMutex.RLock()\n\tdefer fake.abortProcessMutex.RUnlock()\n\treturn len(fake.abortProcessArgsForCall)\n}\n\nfunc (fake *FakeProcessManager) AbortProcessCalls(stub func(string, error)) {\n\tfake.abortProcessMutex.Lock()\n\tdefer fake.abortProcessMutex.Unlock()\n\tfake.AbortProcessStub = stub\n}\n\nfunc (fake *FakeProcessManager) AbortProcessArgsForCall(i int) (string, error) {\n\tfake.abortProcessMutex.RLock()\n\tdefer fake.abortProcessMutex.RUnlock()\n\targsForCall := fake.abortProcessArgsForCall[i]\n\treturn argsForCall.arg1, argsForCall.arg2\n}\n\nfunc (fake *FakeProcessManager) AlreadyExists(arg1 string) bool {\n\tfake.alreadyExistsMutex.Lock()\n\tret, specificReturn := fake.alreadyExistsReturnsOnCall[len(fake.alreadyExistsArgsForCall)]\n\tfake.alreadyExistsArgsForCall = append(fake.alreadyExistsArgsForCall, struct {\n\t\targ1 string\n\t}{arg1})\n\tstub := fake.AlreadyExistsStub\n\tfakeReturns := fake.alreadyExistsReturns\n\tfake.recordInvocation(\"AlreadyExists\", []interface{}{arg1})\n\tfake.alreadyExistsMutex.Unlock()\n\tif stub != nil {\n\t\treturn stub(arg1)\n\t}\n\tif specificReturn {\n\t\treturn ret.result1\n\t}\n\treturn fakeReturns.result1\n}\n\nfunc (fake *FakeProcessManager) AlreadyExistsCallCount() int {\n\tfake.alreadyExistsMutex.RLock()\n\tdefer fake.alreadyExistsMutex.RUnlock()\n\treturn len(fake.alreadyExistsArgsForCall)\n}\n\nfunc (fake *FakeProcessManager) AlreadyExistsCalls(stub func(string) bool) {\n\tfake.alreadyExistsMutex.Lock()\n\tdefer fake.alreadyExistsMutex.Unlock()\n\tfake.AlreadyExistsStub = stub\n}\n\nfunc (fake *FakeProcessManager) AlreadyExistsArgsForCall(i int) string {\n\tfake.alreadyExistsMutex.RLock()\n\tdefer fake.alreadyExistsMutex.RUnlock()\n\targsForCall := fake.alreadyExistsArgsForCall[i]\n\treturn argsForCall.arg1\n}\n\nfunc (fake *FakeProcessManager) AlreadyExistsReturns(result1 bool) {\n\tfake.alreadyExistsMutex.Lock()\n\tdefer fake.alreadyExistsMutex.Unlock()\n\tfake.AlreadyExistsStub = nil\n\tfake.alreadyExistsReturns = struct {\n\t\tresult1 bool\n\t}{result1}\n}\n\nfunc (fake *FakeProcessManager) AlreadyExistsReturnsOnCall(i int, result1 bool) {\n\tfake.alreadyExistsMutex.Lock()\n\tdefer fake.alreadyExistsMutex.Unlock()\n\tfake.AlreadyExistsStub = nil\n\tif fake.alreadyExistsReturnsOnCall == nil {\n\t\tfake.alreadyExistsReturnsOnCall = make(map[int]struct {\n\t\t\tresult1 bool\n\t\t})\n\t}\n\tfake.alreadyExistsReturnsOnCall[i] = struct {\n\t\tresult1 bool\n\t}{result1}\n}\n\nfunc (fake *FakeProcessManager) GetActiveEgressIDs() []string {\n\tfake.getActiveEgressIDsMutex.Lock()\n\tret, specificReturn := fake.getActiveEgressIDsReturnsOnCall[len(fake.getActiveEgressIDsArgsForCall)]\n\tfake.getActiveEgressIDsArgsForCall = append(fake.getActiveEgressIDsArgsForCall, struct {\n\t}{})\n\tstub := fake.GetActiveEgressIDsStub\n\tfakeReturns := fake.getActiveEgressIDsReturns\n\tfake.recordInvocation(\"GetActiveEgressIDs\", []interface{}{})\n\tfake.getActiveEgressIDsMutex.Unlock()\n\tif stub != nil {\n\t\treturn stub()\n\t}\n\tif specificReturn {\n\t\treturn ret.result1\n\t}\n\treturn fakeReturns.result1\n}\n\nfunc (fake *FakeProcessManager) GetActiveEgressIDsCallCount() int {\n\tfake.getActiveEgressIDsMutex.RLock()\n\tdefer fake.getActiveEgressIDsMutex.RUnlock()\n\treturn len(fake.getActiveEgressIDsArgsForCall)\n}\n\nfunc (fake *FakeProcessManager) GetActiveEgressIDsCalls(stub func() []string) {\n\tfake.getActiveEgressIDsMutex.Lock()\n\tdefer fake.getActiveEgressIDsMutex.Unlock()\n\tfake.GetActiveEgressIDsStub = stub\n}\n\nfunc (fake *FakeProcessManager) GetActiveEgressIDsReturns(result1 []string) {\n\tfake.getActiveEgressIDsMutex.Lock()\n\tdefer fake.getActiveEgressIDsMutex.Unlock()\n\tfake.GetActiveEgressIDsStub = nil\n\tfake.getActiveEgressIDsReturns = struct {\n\t\tresult1 []string\n\t}{result1}\n}\n\nfunc (fake *FakeProcessManager) GetActiveEgressIDsReturnsOnCall(i int, result1 []string) {\n\tfake.getActiveEgressIDsMutex.Lock()\n\tdefer fake.getActiveEgressIDsMutex.Unlock()\n\tfake.GetActiveEgressIDsStub = nil\n\tif fake.getActiveEgressIDsReturnsOnCall == nil {\n\t\tfake.getActiveEgressIDsReturnsOnCall = make(map[int]struct {\n\t\t\tresult1 []string\n\t\t})\n\t}\n\tfake.getActiveEgressIDsReturnsOnCall[i] = struct {\n\t\tresult1 []string\n\t}{result1}\n}\n\nfunc (fake *FakeProcessManager) GetContext(arg1 string) context.Context {\n\tfake.getContextMutex.Lock()\n\tret, specificReturn := fake.getContextReturnsOnCall[len(fake.getContextArgsForCall)]\n\tfake.getContextArgsForCall = append(fake.getContextArgsForCall, struct {\n\t\targ1 string\n\t}{arg1})\n\tstub := fake.GetContextStub\n\tfakeReturns := fake.getContextReturns\n\tfake.recordInvocation(\"GetContext\", []interface{}{arg1})\n\tfake.getContextMutex.Unlock()\n\tif stub != nil {\n\t\treturn stub(arg1)\n\t}\n\tif specificReturn {\n\t\treturn ret.result1\n\t}\n\treturn fakeReturns.result1\n}\n\nfunc (fake *FakeProcessManager) GetContextCallCount() int {\n\tfake.getContextMutex.RLock()\n\tdefer fake.getContextMutex.RUnlock()\n\treturn len(fake.getContextArgsForCall)\n}\n\nfunc (fake *FakeProcessManager) GetContextCalls(stub func(string) context.Context) {\n\tfake.getContextMutex.Lock()\n\tdefer fake.getContextMutex.Unlock()\n\tfake.GetContextStub = stub\n}\n\nfunc (fake *FakeProcessManager) GetContextArgsForCall(i int) string {\n\tfake.getContextMutex.RLock()\n\tdefer fake.getContextMutex.RUnlock()\n\targsForCall := fake.getContextArgsForCall[i]\n\treturn argsForCall.arg1\n}\n\nfunc (fake *FakeProcessManager) GetContextReturns(result1 context.Context) {\n\tfake.getContextMutex.Lock()\n\tdefer fake.getContextMutex.Unlock()\n\tfake.GetContextStub = nil\n\tfake.getContextReturns = struct {\n\t\tresult1 context.Context\n\t}{result1}\n}\n\nfunc (fake *FakeProcessManager) GetContextReturnsOnCall(i int, result1 context.Context) {\n\tfake.getContextMutex.Lock()\n\tdefer fake.getContextMutex.Unlock()\n\tfake.GetContextStub = nil\n\tif fake.getContextReturnsOnCall == nil {\n\t\tfake.getContextReturnsOnCall = make(map[int]struct {\n\t\t\tresult1 context.Context\n\t\t})\n\t}\n\tfake.getContextReturnsOnCall[i] = struct {\n\t\tresult1 context.Context\n\t}{result1}\n}\n\nfunc (fake *FakeProcessManager) GetGRPCClient(arg1 string) (ipc.EgressHandlerClient, error) {\n\tfake.getGRPCClientMutex.Lock()\n\tret, specificReturn := fake.getGRPCClientReturnsOnCall[len(fake.getGRPCClientArgsForCall)]\n\tfake.getGRPCClientArgsForCall = append(fake.getGRPCClientArgsForCall, struct {\n\t\targ1 string\n\t}{arg1})\n\tstub := fake.GetGRPCClientStub\n\tfakeReturns := fake.getGRPCClientReturns\n\tfake.recordInvocation(\"GetGRPCClient\", []interface{}{arg1})\n\tfake.getGRPCClientMutex.Unlock()\n\tif stub != nil {\n\t\treturn stub(arg1)\n\t}\n\tif specificReturn {\n\t\treturn ret.result1, ret.result2\n\t}\n\treturn fakeReturns.result1, fakeReturns.result2\n}\n\nfunc (fake *FakeProcessManager) GetGRPCClientCallCount() int {\n\tfake.getGRPCClientMutex.RLock()\n\tdefer fake.getGRPCClientMutex.RUnlock()\n\treturn len(fake.getGRPCClientArgsForCall)\n}\n\nfunc (fake *FakeProcessManager) GetGRPCClientCalls(stub func(string) (ipc.EgressHandlerClient, error)) {\n\tfake.getGRPCClientMutex.Lock()\n\tdefer fake.getGRPCClientMutex.Unlock()\n\tfake.GetGRPCClientStub = stub\n}\n\nfunc (fake *FakeProcessManager) GetGRPCClientArgsForCall(i int) string {\n\tfake.getGRPCClientMutex.RLock()\n\tdefer fake.getGRPCClientMutex.RUnlock()\n\targsForCall := fake.getGRPCClientArgsForCall[i]\n\treturn argsForCall.arg1\n}\n\nfunc (fake *FakeProcessManager) GetGRPCClientReturns(result1 ipc.EgressHandlerClient, result2 error) {\n\tfake.getGRPCClientMutex.Lock()\n\tdefer fake.getGRPCClientMutex.Unlock()\n\tfake.GetGRPCClientStub = nil\n\tfake.getGRPCClientReturns = struct {\n\t\tresult1 ipc.EgressHandlerClient\n\t\tresult2 error\n\t}{result1, result2}\n}\n\nfunc (fake *FakeProcessManager) GetGRPCClientReturnsOnCall(i int, result1 ipc.EgressHandlerClient, result2 error) {\n\tfake.getGRPCClientMutex.Lock()\n\tdefer fake.getGRPCClientMutex.Unlock()\n\tfake.GetGRPCClientStub = nil\n\tif fake.getGRPCClientReturnsOnCall == nil {\n\t\tfake.getGRPCClientReturnsOnCall = make(map[int]struct {\n\t\t\tresult1 ipc.EgressHandlerClient\n\t\t\tresult2 error\n\t\t})\n\t}\n\tfake.getGRPCClientReturnsOnCall[i] = struct {\n\t\tresult1 ipc.EgressHandlerClient\n\t\tresult2 error\n\t}{result1, result2}\n}\n\nfunc (fake *FakeProcessManager) GetGatherers() []prometheus.Gatherer {\n\tfake.getGatherersMutex.Lock()\n\tret, specificReturn := fake.getGatherersReturnsOnCall[len(fake.getGatherersArgsForCall)]\n\tfake.getGatherersArgsForCall = append(fake.getGatherersArgsForCall, struct {\n\t}{})\n\tstub := fake.GetGatherersStub\n\tfakeReturns := fake.getGatherersReturns\n\tfake.recordInvocation(\"GetGatherers\", []interface{}{})\n\tfake.getGatherersMutex.Unlock()\n\tif stub != nil {\n\t\treturn stub()\n\t}\n\tif specificReturn {\n\t\treturn ret.result1\n\t}\n\treturn fakeReturns.result1\n}\n\nfunc (fake *FakeProcessManager) GetGatherersCallCount() int {\n\tfake.getGatherersMutex.RLock()\n\tdefer fake.getGatherersMutex.RUnlock()\n\treturn len(fake.getGatherersArgsForCall)\n}\n\nfunc (fake *FakeProcessManager) GetGatherersCalls(stub func() []prometheus.Gatherer) {\n\tfake.getGatherersMutex.Lock()\n\tdefer fake.getGatherersMutex.Unlock()\n\tfake.GetGatherersStub = stub\n}\n\nfunc (fake *FakeProcessManager) GetGatherersReturns(result1 []prometheus.Gatherer) {\n\tfake.getGatherersMutex.Lock()\n\tdefer fake.getGatherersMutex.Unlock()\n\tfake.GetGatherersStub = nil\n\tfake.getGatherersReturns = struct {\n\t\tresult1 []prometheus.Gatherer\n\t}{result1}\n}\n\nfunc (fake *FakeProcessManager) GetGatherersReturnsOnCall(i int, result1 []prometheus.Gatherer) {\n\tfake.getGatherersMutex.Lock()\n\tdefer fake.getGatherersMutex.Unlock()\n\tfake.GetGatherersStub = nil\n\tif fake.getGatherersReturnsOnCall == nil {\n\t\tfake.getGatherersReturnsOnCall = make(map[int]struct {\n\t\t\tresult1 []prometheus.Gatherer\n\t\t})\n\t}\n\tfake.getGatherersReturnsOnCall[i] = struct {\n\t\tresult1 []prometheus.Gatherer\n\t}{result1}\n}\n\nfunc (fake *FakeProcessManager) GetStatus(arg1 map[string]interface{}) {\n\tfake.getStatusMutex.Lock()\n\tfake.getStatusArgsForCall = append(fake.getStatusArgsForCall, struct {\n\t\targ1 map[string]interface{}\n\t}{arg1})\n\tstub := fake.GetStatusStub\n\tfake.recordInvocation(\"GetStatus\", []interface{}{arg1})\n\tfake.getStatusMutex.Unlock()\n\tif stub != nil {\n\t\tfake.GetStatusStub(arg1)\n\t}\n}\n\nfunc (fake *FakeProcessManager) GetStatusCallCount() int {\n\tfake.getStatusMutex.RLock()\n\tdefer fake.getStatusMutex.RUnlock()\n\treturn len(fake.getStatusArgsForCall)\n}\n\nfunc (fake *FakeProcessManager) GetStatusCalls(stub func(map[string]interface{})) {\n\tfake.getStatusMutex.Lock()\n\tdefer fake.getStatusMutex.Unlock()\n\tfake.GetStatusStub = stub\n}\n\nfunc (fake *FakeProcessManager) GetStatusArgsForCall(i int) map[string]interface{} {\n\tfake.getStatusMutex.RLock()\n\tdefer fake.getStatusMutex.RUnlock()\n\targsForCall := fake.getStatusArgsForCall[i]\n\treturn argsForCall.arg1\n}\n\nfunc (fake *FakeProcessManager) HandlerStarted(arg1 string) error {\n\tfake.handlerStartedMutex.Lock()\n\tret, specificReturn := fake.handlerStartedReturnsOnCall[len(fake.handlerStartedArgsForCall)]\n\tfake.handlerStartedArgsForCall = append(fake.handlerStartedArgsForCall, struct {\n\t\targ1 string\n\t}{arg1})\n\tstub := fake.HandlerStartedStub\n\tfakeReturns := fake.handlerStartedReturns\n\tfake.recordInvocation(\"HandlerStarted\", []interface{}{arg1})\n\tfake.handlerStartedMutex.Unlock()\n\tif stub != nil {\n\t\treturn stub(arg1)\n\t}\n\tif specificReturn {\n\t\treturn ret.result1\n\t}\n\treturn fakeReturns.result1\n}\n\nfunc (fake *FakeProcessManager) HandlerStartedCallCount() int {\n\tfake.handlerStartedMutex.RLock()\n\tdefer fake.handlerStartedMutex.RUnlock()\n\treturn len(fake.handlerStartedArgsForCall)\n}\n\nfunc (fake *FakeProcessManager) HandlerStartedCalls(stub func(string) error) {\n\tfake.handlerStartedMutex.Lock()\n\tdefer fake.handlerStartedMutex.Unlock()\n\tfake.HandlerStartedStub = stub\n}\n\nfunc (fake *FakeProcessManager) HandlerStartedArgsForCall(i int) string {\n\tfake.handlerStartedMutex.RLock()\n\tdefer fake.handlerStartedMutex.RUnlock()\n\targsForCall := fake.handlerStartedArgsForCall[i]\n\treturn argsForCall.arg1\n}\n\nfunc (fake *FakeProcessManager) HandlerStartedReturns(result1 error) {\n\tfake.handlerStartedMutex.Lock()\n\tdefer fake.handlerStartedMutex.Unlock()\n\tfake.HandlerStartedStub = nil\n\tfake.handlerStartedReturns = struct {\n\t\tresult1 error\n\t}{result1}\n}\n\nfunc (fake *FakeProcessManager) HandlerStartedReturnsOnCall(i int, result1 error) {\n\tfake.handlerStartedMutex.Lock()\n\tdefer fake.handlerStartedMutex.Unlock()\n\tfake.HandlerStartedStub = nil\n\tif fake.handlerStartedReturnsOnCall == nil {\n\t\tfake.handlerStartedReturnsOnCall = make(map[int]struct {\n\t\t\tresult1 error\n\t\t})\n\t}\n\tfake.handlerStartedReturnsOnCall[i] = struct {\n\t\tresult1 error\n\t}{result1}\n}\n\nfunc (fake *FakeProcessManager) KillAll() {\n\tfake.killAllMutex.Lock()\n\tfake.killAllArgsForCall = append(fake.killAllArgsForCall, struct {\n\t}{})\n\tstub := fake.KillAllStub\n\tfake.recordInvocation(\"KillAll\", []interface{}{})\n\tfake.killAllMutex.Unlock()\n\tif stub != nil {\n\t\tfake.KillAllStub()\n\t}\n}\n\nfunc (fake *FakeProcessManager) KillAllCallCount() int {\n\tfake.killAllMutex.RLock()\n\tdefer fake.killAllMutex.RUnlock()\n\treturn len(fake.killAllArgsForCall)\n}\n\nfunc (fake *FakeProcessManager) KillAllCalls(stub func()) {\n\tfake.killAllMutex.Lock()\n\tdefer fake.killAllMutex.Unlock()\n\tfake.KillAllStub = stub\n}\n\nfunc (fake *FakeProcessManager) KillProcess(arg1 string, arg2 error) {\n\tfake.killProcessMutex.Lock()\n\tfake.killProcessArgsForCall = append(fake.killProcessArgsForCall, struct {\n\t\targ1 string\n\t\targ2 error\n\t}{arg1, arg2})\n\tstub := fake.KillProcessStub\n\tfake.recordInvocation(\"KillProcess\", []interface{}{arg1, arg2})\n\tfake.killProcessMutex.Unlock()\n\tif stub != nil {\n\t\tfake.KillProcessStub(arg1, arg2)\n\t}\n}\n\nfunc (fake *FakeProcessManager) KillProcessCallCount() int {\n\tfake.killProcessMutex.RLock()\n\tdefer fake.killProcessMutex.RUnlock()\n\treturn len(fake.killProcessArgsForCall)\n}\n\nfunc (fake *FakeProcessManager) KillProcessCalls(stub func(string, error)) {\n\tfake.killProcessMutex.Lock()\n\tdefer fake.killProcessMutex.Unlock()\n\tfake.KillProcessStub = stub\n}\n\nfunc (fake *FakeProcessManager) KillProcessArgsForCall(i int) (string, error) {\n\tfake.killProcessMutex.RLock()\n\tdefer fake.killProcessMutex.RUnlock()\n\targsForCall := fake.killProcessArgsForCall[i]\n\treturn argsForCall.arg1, argsForCall.arg2\n}\n\nfunc (fake *FakeProcessManager) Launch(arg1 context.Context, arg2 string, arg3 *rpc.StartEgressRequest, arg4 *livekit.EgressInfo, arg5 *exec.Cmd) error {\n\tfake.launchMutex.Lock()\n\tret, specificReturn := fake.launchReturnsOnCall[len(fake.launchArgsForCall)]\n\tfake.launchArgsForCall = append(fake.launchArgsForCall, struct {\n\t\targ1 context.Context\n\t\targ2 string\n\t\targ3 *rpc.StartEgressRequest\n\t\targ4 *livekit.EgressInfo\n\t\targ5 *exec.Cmd\n\t}{arg1, arg2, arg3, arg4, arg5})\n\tstub := fake.LaunchStub\n\tfakeReturns := fake.launchReturns\n\tfake.recordInvocation(\"Launch\", []interface{}{arg1, arg2, arg3, arg4, arg5})\n\tfake.launchMutex.Unlock()\n\tif stub != nil {\n\t\treturn stub(arg1, arg2, arg3, arg4, arg5)\n\t}\n\tif specificReturn {\n\t\treturn ret.result1\n\t}\n\treturn fakeReturns.result1\n}\n\nfunc (fake *FakeProcessManager) LaunchCallCount() int {\n\tfake.launchMutex.RLock()\n\tdefer fake.launchMutex.RUnlock()\n\treturn len(fake.launchArgsForCall)\n}\n\nfunc (fake *FakeProcessManager) LaunchCalls(stub func(context.Context, string, *rpc.StartEgressRequest, *livekit.EgressInfo, *exec.Cmd) error) {\n\tfake.launchMutex.Lock()\n\tdefer fake.launchMutex.Unlock()\n\tfake.LaunchStub = stub\n}\n\nfunc (fake *FakeProcessManager) LaunchArgsForCall(i int) (context.Context, string, *rpc.StartEgressRequest, *livekit.EgressInfo, *exec.Cmd) {\n\tfake.launchMutex.RLock()\n\tdefer fake.launchMutex.RUnlock()\n\targsForCall := fake.launchArgsForCall[i]\n\treturn argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4, argsForCall.arg5\n}\n\nfunc (fake *FakeProcessManager) LaunchReturns(result1 error) {\n\tfake.launchMutex.Lock()\n\tdefer fake.launchMutex.Unlock()\n\tfake.LaunchStub = nil\n\tfake.launchReturns = struct {\n\t\tresult1 error\n\t}{result1}\n}\n\nfunc (fake *FakeProcessManager) LaunchReturnsOnCall(i int, result1 error) {\n\tfake.launchMutex.Lock()\n\tdefer fake.launchMutex.Unlock()\n\tfake.LaunchStub = nil\n\tif fake.launchReturnsOnCall == nil {\n\t\tfake.launchReturnsOnCall = make(map[int]struct {\n\t\t\tresult1 error\n\t\t})\n\t}\n\tfake.launchReturnsOnCall[i] = struct {\n\t\tresult1 error\n\t}{result1}\n}\n\nfunc (fake *FakeProcessManager) ProcessFinished(arg1 string) {\n\tfake.processFinishedMutex.Lock()\n\tfake.processFinishedArgsForCall = append(fake.processFinishedArgsForCall, struct {\n\t\targ1 string\n\t}{arg1})\n\tstub := fake.ProcessFinishedStub\n\tfake.recordInvocation(\"ProcessFinished\", []interface{}{arg1})\n\tfake.processFinishedMutex.Unlock()\n\tif stub != nil {\n\t\tfake.ProcessFinishedStub(arg1)\n\t}\n}\n\nfunc (fake *FakeProcessManager) ProcessFinishedCallCount() int {\n\tfake.processFinishedMutex.RLock()\n\tdefer fake.processFinishedMutex.RUnlock()\n\treturn len(fake.processFinishedArgsForCall)\n}\n\nfunc (fake *FakeProcessManager) ProcessFinishedCalls(stub func(string)) {\n\tfake.processFinishedMutex.Lock()\n\tdefer fake.processFinishedMutex.Unlock()\n\tfake.ProcessFinishedStub = stub\n}\n\nfunc (fake *FakeProcessManager) ProcessFinishedArgsForCall(i int) string {\n\tfake.processFinishedMutex.RLock()\n\tdefer fake.processFinishedMutex.RUnlock()\n\targsForCall := fake.processFinishedArgsForCall[i]\n\treturn argsForCall.arg1\n}\n\nfunc (fake *FakeProcessManager) Invocations() map[string][][]interface{} {\n\tfake.invocationsMutex.RLock()\n\tdefer fake.invocationsMutex.RUnlock()\n\tcopiedInvocations := map[string][][]interface{}{}\n\tfor key, value := range fake.invocations {\n\t\tcopiedInvocations[key] = value\n\t}\n\treturn copiedInvocations\n}\n\nfunc (fake *FakeProcessManager) recordInvocation(key string, args []interface{}) {\n\tfake.invocationsMutex.Lock()\n\tdefer fake.invocationsMutex.Unlock()\n\tif fake.invocations == nil {\n\t\tfake.invocations = map[string][][]interface{}{}\n\t}\n\tif fake.invocations[key] == nil {\n\t\tfake.invocations[key] = [][]interface{}{}\n\t}\n\tfake.invocations[key] = append(fake.invocations[key], args)\n}\n\nvar _ service.ProcessManager = new(FakeProcessManager)\n"
  },
  {
    "path": "pkg/stats/handler.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage stats\n\nimport (\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\ntype HandlerMonitor struct {\n\tuploadsCounter      *prometheus.CounterVec\n\tuploadsResponseTime *prometheus.HistogramVec\n\tbackupCounter       *prometheus.CounterVec\n}\n\nfunc NewHandlerMonitor(nodeID, clusterID, egressID string) *HandlerMonitor {\n\tm := &HandlerMonitor{}\n\n\tconstantLabels := prometheus.Labels{\"node_id\": nodeID, \"cluster_id\": clusterID, \"egress_id\": egressID}\n\n\tm.uploadsCounter = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace:   \"livekit\",\n\t\tSubsystem:   \"egress\",\n\t\tName:        \"pipeline_uploads\",\n\t\tHelp:        \"Number of uploads per pipeline with type and status labels\",\n\t\tConstLabels: constantLabels,\n\t}, []string{\"type\", \"status\"}) // type: file, manifest, segment, liveplaylist, playlist; status: success,failure\n\n\tm.uploadsResponseTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\tNamespace:   \"livekit\",\n\t\tSubsystem:   \"egress\",\n\t\tName:        \"pipline_upload_response_time_ms\",\n\t\tHelp:        \"A histogram of latencies for upload requests in milliseconds.\",\n\t\tBuckets:     []float64{10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 15000, 20000, 30000},\n\t\tConstLabels: constantLabels,\n\t}, []string{\"type\", \"status\"})\n\n\tm.backupCounter = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace:   \"livekit\",\n\t\tSubsystem:   \"egress\",\n\t\tName:        \"backup_storage_writes\",\n\t\tHelp:        \"number of writes to backup storage location by output type\",\n\t\tConstLabels: constantLabels,\n\t}, []string{\"output_type\"})\n\n\tprometheus.MustRegister(m.uploadsCounter, m.uploadsResponseTime, m.backupCounter)\n\n\treturn m\n}\n\nfunc (m *HandlerMonitor) IncUploadCountSuccess(uploadType string, elapsed float64) {\n\tlabels := prometheus.Labels{\"type\": uploadType, \"status\": \"success\"}\n\tm.uploadsCounter.With(labels).Add(1)\n\tm.uploadsResponseTime.With(labels).Observe(elapsed)\n}\n\nfunc (m *HandlerMonitor) IncUploadCountFailure(uploadType string, elapsed float64) {\n\tlabels := prometheus.Labels{\"type\": uploadType, \"status\": \"failure\"}\n\tm.uploadsCounter.With(labels).Add(1)\n\tm.uploadsResponseTime.With(labels).Observe(elapsed)\n}\n\nfunc (m *HandlerMonitor) IncBackupStorageWrites(outputType string) {\n\tm.backupCounter.With(prometheus.Labels{\"output_type\": outputType}).Add(1)\n}\n\nfunc (m *HandlerMonitor) RegisterSegmentsChannelSizeGauge(nodeID, clusterID, egressID string, channelSizeFunction func() float64) {\n\tsegmentsUploadsGauge := prometheus.NewGaugeFunc(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace:   \"livekit\",\n\t\t\tSubsystem:   \"egress\",\n\t\t\tName:        \"segments_uploads_channel_size\",\n\t\t\tHelp:        \"number of segment uploads pending in channel\",\n\t\t\tConstLabels: prometheus.Labels{\"node_id\": nodeID, \"cluster_id\": clusterID, \"egress_id\": egressID},\n\t\t}, channelSizeFunction)\n\tprometheus.MustRegister(segmentsUploadsGauge)\n}\n\nfunc (m *HandlerMonitor) RegisterPlaylistChannelSizeGauge(nodeID, clusterID, egressID string, channelSizeFunction func() float64) {\n\tplaylistUploadsGauge := prometheus.NewGaugeFunc(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace:   \"livekit\",\n\t\t\tSubsystem:   \"egress\",\n\t\t\tName:        \"playlist_uploads_channel_size\",\n\t\t\tHelp:        \"number of playlist updates pending in channel\",\n\t\t\tConstLabels: prometheus.Labels{\"node_id\": nodeID, \"cluster_id\": clusterID, \"egress_id\": egressID},\n\t\t}, channelSizeFunction)\n\tprometheus.MustRegister(playlistUploadsGauge)\n}\n"
  },
  {
    "path": "pkg/stats/monitor.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage stats\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/linkdata/deadlock\"\n\t\"github.com/pbnjay/memory\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"go.uber.org/atomic\"\n\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/rpc\"\n\t\"github.com/livekit/protocol/utils/hwstats\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/errors\"\n\t\"github.com/livekit/egress/pkg/pipeline/source/pulse\"\n\t\"github.com/livekit/egress/pkg/types\"\n)\n\nconst (\n\tcpuHoldDuration         = time.Second * 15\n\tdefaultKillThreshold    = 0.95\n\tminKillDuration         = 10\n\tgb                      = 1024.0 * 1024.0 * 1024.0\n\tpulseClientHold         = 4\n\tmemoryHeadroomGB        = 1.0\n\tmemoryUsageDumpInterval = 10 * time.Minute\n)\n\ntype Service interface {\n\tIsIdle() bool\n\tIsDisabled() bool\n\tIsTerminating() bool\n\tKillProcess(string, error)\n}\n\ntype Monitor struct {\n\tnodeID        string\n\tclusterID     string\n\tcpuCostConfig *config.CPUCostConfig\n\n\tpromCPULoad           prometheus.Gauge\n\tpromCgroupMemory      prometheus.Gauge\n\tpromCgroupReadSuccess prometheus.Gauge\n\tpromProcRSS           prometheus.Gauge\n\tpromWouldRejectCgroup prometheus.Gauge\n\trequestGauge          *prometheus.GaugeVec\n\n\tsvc                 Service\n\tcpuStats            *hwstats.CPUStats\n\tcgroupMemStats      *hwstats.MemoryStats\n\trequests            atomic.Int32\n\twebRequests         atomic.Int32\n\tpendingPulseClients atomic.Int32\n\tpendingMemoryUsage  atomic.Float64\n\n\tmu                deadlock.Mutex\n\thighCPUDuration   int\n\thighMemoryStart   time.Time\n\tlastMemoryDump    time.Time\n\tpending           map[string]*processStats\n\tprocStats         map[int]*processStats\n\tmemoryUsage       float64\n\tcgroupUsageBytes  uint64\n\tcgroupOK          bool\n\tcgroupErrorLogged atomic.Bool\n}\n\ntype processStats struct {\n\tegressID string\n\n\tpendingCPU float64\n\tlastCPU    float64\n\tallowedCPU float64\n\n\ttotalCPU     float64\n\tcpuCounter   int\n\tmaxCPU       float64\n\tmaxMemory    int\n\tcountedAsWeb bool\n}\n\nfunc NewMonitor(conf *config.ServiceConfig, svc Service) (*Monitor, error) {\n\tm := &Monitor{\n\t\tnodeID:         conf.NodeID,\n\t\tclusterID:      conf.ClusterID,\n\t\tcpuCostConfig:  conf.CPUCostConfig,\n\t\tsvc:            svc,\n\t\tpending:        make(map[string]*processStats),\n\t\tprocStats:      make(map[int]*processStats),\n\t\tlastMemoryDump: time.Now(),\n\t}\n\n\tm.initPrometheus()\n\n\tprocStats, err := hwstats.NewProcMonitor(m.updateEgressStats)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.cpuStats = procStats\n\n\tif err = m.validateCPUConfig(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tmemStats, err := hwstats.NewMemoryStats()\n\tif err != nil {\n\t\tlogger.Warnw(\"failed to initialize cgroup memory stats\", err)\n\t} else {\n\t\tm.cgroupMemStats = memStats\n\t}\n\n\treturn m, nil\n}\n\nfunc (m *Monitor) validateCPUConfig() error {\n\trequirements := []float64{\n\t\tm.cpuCostConfig.RoomCompositeCpuCost,\n\t\tm.cpuCostConfig.AudioRoomCompositeCpuCost,\n\t\tm.cpuCostConfig.WebCpuCost,\n\t\tm.cpuCostConfig.AudioWebCpuCost,\n\t\tm.cpuCostConfig.ParticipantCpuCost,\n\t\tm.cpuCostConfig.TrackCompositeCpuCost,\n\t\tm.cpuCostConfig.TrackCpuCost,\n\t}\n\tsort.Float64s(requirements)\n\n\trecommendedMinimum := requirements[len(requirements)-1]\n\tif recommendedMinimum < 3 {\n\t\trecommendedMinimum = 3\n\t}\n\n\tif m.cpuStats.NumCPU() < requirements[0] {\n\t\tlogger.Errorw(\"not enough cpu\", nil,\n\t\t\t\"minimumCpu\", requirements[0],\n\t\t\t\"recommended\", recommendedMinimum,\n\t\t\t\"available\", m.cpuStats.NumCPU(),\n\t\t)\n\t\treturn errors.New(\"not enough cpu\")\n\t}\n\n\tif m.cpuStats.NumCPU() < requirements[len(requirements)-1] {\n\t\tlogger.Errorw(\"not enough cpu for some egress types\", nil,\n\t\t\t\"minimumCpu\", requirements[len(requirements)-1],\n\t\t\t\"recommended\", recommendedMinimum,\n\t\t\t\"available\", m.cpuStats.NumCPU(),\n\t\t)\n\t}\n\n\tlogger.Infow(fmt.Sprintf(\"cpu available: %f max cost: %f\", m.cpuStats.NumCPU(), requirements[len(requirements)-1]))\n\n\treturn nil\n}\n\nfunc (m *Monitor) CanAcceptRequest(req *rpc.StartEgressRequest) bool {\n\tm.mu.Lock()\n\tfields, canAccept := m.canAcceptRequestLocked(req)\n\tm.mu.Unlock()\n\n\tlogger.Debugw(\"cpu check\", fields...)\n\treturn canAccept\n}\n\nfunc (m *Monitor) CanAcceptWebRequest() bool {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\treturn m.canAcceptWebLocked()\n}\n\nfunc (m *Monitor) canAcceptRequestLocked(req *rpc.StartEgressRequest) ([]interface{}, bool) {\n\ttotal, available, pending, used := m.getCPUUsageLocked()\n\tfields := []interface{}{\n\t\t\"total\", total,\n\t\t\"available\", available,\n\t\t\"pending\", pending,\n\t\t\"used\", used,\n\t\t\"activeRequests\", m.requests.Load(),\n\t\t\"activeWeb\", m.webRequests.Load(),\n\t\t\"memory\", m.memoryUsage,\n\t\t\"memorySource\", m.cpuCostConfig.MemorySource,\n\t}\n\n\t// Memory admission check based on configured source\n\tif reject, reason := m.checkMemoryAdmissionLocked(); reject {\n\t\tfields = append(fields, \"canAccept\", false, \"reason\", reason)\n\t\treturn fields, false\n\t}\n\n\trequired := req.EstimatedCpu\n\tswitch r := req.Request.(type) {\n\tcase *rpc.StartEgressRequest_RoomComposite:\n\t\tuseSDK := config.ShouldUseSDKSource(r.RoomComposite)\n\t\tif !useSDK && !m.canAcceptWebLocked() {\n\t\t\tfields = append(fields, \"canAccept\", false, \"reason\", \"pulse clients\")\n\t\t\treturn fields, false\n\t\t}\n\t\tif required == 0 {\n\t\t\tif r.RoomComposite.AudioOnly {\n\t\t\t\trequired = m.cpuCostConfig.AudioRoomCompositeCpuCost\n\t\t\t} else {\n\t\t\t\trequired = m.cpuCostConfig.RoomCompositeCpuCost\n\t\t\t}\n\t\t}\n\tcase *rpc.StartEgressRequest_Web:\n\t\tif !m.canAcceptWebLocked() {\n\t\t\tfields = append(fields, \"canAccept\", false, \"reason\", \"pulse clients\")\n\t\t\treturn fields, false\n\t\t}\n\t\tif required == 0 {\n\t\t\tif r.Web.AudioOnly {\n\t\t\t\trequired = m.cpuCostConfig.AudioWebCpuCost\n\t\t\t} else {\n\t\t\t\trequired = m.cpuCostConfig.WebCpuCost\n\t\t\t}\n\t\t}\n\tcase *rpc.StartEgressRequest_Participant:\n\t\tif required == 0 {\n\t\t\trequired = m.cpuCostConfig.ParticipantCpuCost\n\t\t}\n\tcase *rpc.StartEgressRequest_TrackComposite:\n\t\tif required == 0 {\n\t\t\trequired = m.cpuCostConfig.TrackCompositeCpuCost\n\t\t}\n\tcase *rpc.StartEgressRequest_Track:\n\t\tif required == 0 {\n\t\t\trequired = m.cpuCostConfig.TrackCpuCost\n\t\t}\n\tcase *rpc.StartEgressRequest_Replay:\n\t\treplayReq := r.Replay\n\t\tswitch source := replayReq.Source.(type) {\n\t\tcase *livekit.ExportReplayRequest_Template:\n\t\t\tuseSDK := config.ShouldUseSDKSource(source.Template)\n\t\t\tif !useSDK && !m.canAcceptWebLocked() {\n\t\t\t\tfields = append(fields, \"canAccept\", false, \"reason\", \"pulse clients\")\n\t\t\t\treturn fields, false\n\t\t\t}\n\t\t\tif required == 0 {\n\t\t\t\tif source.Template.AudioOnly {\n\t\t\t\t\trequired = m.cpuCostConfig.AudioRoomCompositeCpuCost\n\t\t\t\t} else {\n\t\t\t\t\trequired = m.cpuCostConfig.RoomCompositeCpuCost\n\t\t\t\t}\n\t\t\t}\n\t\tcase *livekit.ExportReplayRequest_Web:\n\t\t\tif !m.canAcceptWebLocked() {\n\t\t\t\tfields = append(fields, \"canAccept\", false, \"reason\", \"pulse clients\")\n\t\t\t\treturn fields, false\n\t\t\t}\n\t\t\tif required == 0 {\n\t\t\t\tif source.Web.AudioOnly {\n\t\t\t\t\trequired = m.cpuCostConfig.AudioWebCpuCost\n\t\t\t\t} else {\n\t\t\t\t\trequired = m.cpuCostConfig.WebCpuCost\n\t\t\t\t}\n\t\t\t}\n\t\tcase *livekit.ExportReplayRequest_Media:\n\t\t\tif required == 0 {\n\t\t\t\trequired = m.cpuCostConfig.ParticipantCpuCost\n\t\t\t}\n\t\t}\n\t}\n\n\taccept := available >= required\n\tfields = append(fields,\n\t\t\"required\", required,\n\t\t\"canAccept\", accept,\n\t)\n\tif !accept {\n\t\tfields = append(fields, \"reason\", \"cpu\")\n\t}\n\n\treturn fields, accept\n}\n\nfunc (m *Monitor) canAcceptWebLocked() bool {\n\tclients, err := pulse.Clients()\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn clients+int(m.pendingPulseClients.Load())+pulseClientHold <= m.cpuCostConfig.MaxPulseClients\n}\n\n// checkMemoryAdmissionLocked checks if a request should be rejected due to memory constraints.\n// Returns (reject, reason) where reject=true means the request should be rejected.\nfunc (m *Monitor) checkMemoryAdmissionLocked() (bool, string) {\n\tif m.cpuCostConfig.MaxMemory == 0 {\n\t\treturn false, \"\"\n\t}\n\n\tpendingMem := m.pendingMemoryUsage.Load()\n\tmemoryCost := m.cpuCostConfig.MemoryCost\n\theadroom := memoryHeadroomGB\n\tmaxMem := m.cpuCostConfig.MaxMemory\n\n\tswitch m.cpuCostConfig.MemorySource {\n\tcase config.MemorySourceCgroup:\n\t\tif !m.cgroupOK {\n\t\t\t// Fallback to proc_rss\n\t\t\treturn m.checkProcRSSMemoryAdmission(pendingMem, memoryCost, headroom, maxMem)\n\t\t}\n\t\tcgroupGB := float64(m.cgroupUsageBytes) / gb\n\t\tif cgroupGB+pendingMem+memoryCost+headroom >= maxMem {\n\t\t\treturn true, \"memory_cgroup\"\n\t\t}\n\n\tdefault: // proc_rss\n\t\treturn m.checkProcRSSMemoryAdmission(pendingMem, memoryCost, headroom, maxMem)\n\t}\n\n\treturn false, \"\"\n}\n\n// checkProcRSSMemoryAdmission implements the original per-process RSS based admission.\nfunc (m *Monitor) checkProcRSSMemoryAdmission(pendingMem, memoryCost, headroom, maxMem float64) (bool, string) {\n\tmemoryUsage := m.memoryUsage + pendingMem\n\tif memoryUsage+memoryCost+headroom >= maxMem {\n\t\treturn true, \"memory\"\n\t}\n\treturn false, \"\"\n}\n\nfunc (m *Monitor) AcceptRequest(req *rpc.StartEgressRequest) error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tif m.pending[req.EgressId] != nil {\n\t\treturn errors.ErrEgressAlreadyExists\n\t}\n\tif _, ok := m.canAcceptRequestLocked(req); !ok {\n\t\tlogger.Debugw(\"can not accept request\", nil)\n\t\treturn errors.ErrNotEnoughCPU\n\t}\n\n\tm.requests.Inc()\n\tvar cpuHold float64\n\tvar pulseClients int32\n\tvar countedAsWeb bool\n\n\tswitch r := req.Request.(type) {\n\tcase *rpc.StartEgressRequest_RoomComposite:\n\t\tuseSDK := config.ShouldUseSDKSource(r.RoomComposite)\n\t\tif !useSDK {\n\t\t\tm.webRequests.Inc()\n\t\t\tcountedAsWeb = true\n\t\t\tpulseClients = pulseClientHold\n\t\t}\n\t\tif r.RoomComposite.AudioOnly {\n\t\t\tcpuHold = m.cpuCostConfig.AudioRoomCompositeCpuCost\n\t\t} else {\n\t\t\tcpuHold = m.cpuCostConfig.RoomCompositeCpuCost\n\t\t}\n\tcase *rpc.StartEgressRequest_Web:\n\t\tpulseClients = pulseClientHold\n\t\tm.webRequests.Inc()\n\t\tcountedAsWeb = true\n\t\tif r.Web.AudioOnly {\n\t\t\tcpuHold = m.cpuCostConfig.AudioWebCpuCost\n\t\t} else {\n\t\t\tcpuHold = m.cpuCostConfig.WebCpuCost\n\t\t}\n\tcase *rpc.StartEgressRequest_Participant:\n\t\tcpuHold = m.cpuCostConfig.ParticipantCpuCost\n\tcase *rpc.StartEgressRequest_TrackComposite:\n\t\tcpuHold = m.cpuCostConfig.TrackCompositeCpuCost\n\tcase *rpc.StartEgressRequest_Track:\n\t\tcpuHold = m.cpuCostConfig.TrackCpuCost\n\tcase *rpc.StartEgressRequest_Replay:\n\t\treplayReq := r.Replay\n\t\tswitch source := replayReq.Source.(type) {\n\t\tcase *livekit.ExportReplayRequest_Template:\n\t\t\tuseSDK := config.ShouldUseSDKSource(source.Template)\n\t\t\tif !useSDK {\n\t\t\t\tm.webRequests.Inc()\n\t\t\t\tcountedAsWeb = true\n\t\t\t\tpulseClients = pulseClientHold\n\t\t\t}\n\t\t\tif source.Template.AudioOnly {\n\t\t\t\tcpuHold = m.cpuCostConfig.AudioRoomCompositeCpuCost\n\t\t\t} else {\n\t\t\t\tcpuHold = m.cpuCostConfig.RoomCompositeCpuCost\n\t\t\t}\n\t\tcase *livekit.ExportReplayRequest_Web:\n\t\t\tpulseClients = pulseClientHold\n\t\t\tm.webRequests.Inc()\n\t\t\tcountedAsWeb = true\n\t\t\tif source.Web.AudioOnly {\n\t\t\t\tcpuHold = m.cpuCostConfig.AudioWebCpuCost\n\t\t\t} else {\n\t\t\t\tcpuHold = m.cpuCostConfig.WebCpuCost\n\t\t\t}\n\t\tcase *livekit.ExportReplayRequest_Media:\n\t\t\tcpuHold = m.cpuCostConfig.ParticipantCpuCost\n\t\t}\n\t}\n\n\tps := &processStats{\n\t\tegressID:     req.EgressId,\n\t\tpendingCPU:   cpuHold,\n\t\tallowedCPU:   cpuHold,\n\t\tcountedAsWeb: countedAsWeb,\n\t}\n\n\tm.pendingMemoryUsage.Add(m.cpuCostConfig.MemoryCost)\n\tm.pendingPulseClients.Add(pulseClients)\n\n\ttime.AfterFunc(cpuHoldDuration, func() {\n\t\tps.pendingCPU = 0\n\t\tm.pendingMemoryUsage.Add(-m.cpuCostConfig.MemoryCost)\n\t\tm.pendingPulseClients.Add(-pulseClients)\n\t})\n\tm.pending[req.EgressId] = ps\n\n\treturn nil\n}\n\nfunc (m *Monitor) UpdatePID(egressID string, pid int) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tps := m.pending[egressID]\n\tdelete(m.pending, egressID)\n\n\tif ps == nil {\n\t\tlogger.Warnw(\"missing pending procStats\", nil, \"egressID\", egressID)\n\t\tps = &processStats{\n\t\t\tegressID:   egressID,\n\t\t\tallowedCPU: m.cpuCostConfig.WebCpuCost,\n\t\t}\n\t}\n\n\tif existing := m.procStats[pid]; existing != nil {\n\t\tps.maxCPU = existing.maxCPU\n\t\tps.totalCPU = existing.totalCPU\n\t\tps.cpuCounter = existing.cpuCounter\n\t\tps.countedAsWeb = existing.countedAsWeb\n\t}\n\tm.procStats[pid] = ps\n}\n\nfunc (m *Monitor) EgressStarted(req *rpc.StartEgressRequest) {\n\tswitch req.Request.(type) {\n\tcase *rpc.StartEgressRequest_RoomComposite:\n\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeRoomComposite}).Add(1)\n\tcase *rpc.StartEgressRequest_Web:\n\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeWeb}).Add(1)\n\tcase *rpc.StartEgressRequest_Participant:\n\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeParticipant}).Add(1)\n\tcase *rpc.StartEgressRequest_TrackComposite:\n\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeTrackComposite}).Add(1)\n\tcase *rpc.StartEgressRequest_Track:\n\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeTrack}).Add(1)\n\tcase *rpc.StartEgressRequest_Replay:\n\t\treplayReq := req.Request.(*rpc.StartEgressRequest_Replay).Replay\n\t\tswitch replayReq.Source.(type) {\n\t\tcase *livekit.ExportReplayRequest_Template:\n\t\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeTemplate}).Add(1)\n\t\tcase *livekit.ExportReplayRequest_Web:\n\t\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeWeb}).Add(1)\n\t\tcase *livekit.ExportReplayRequest_Media:\n\t\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeMedia}).Add(1)\n\t\t}\n\t}\n}\n\nfunc (m *Monitor) EgressAborted(req *rpc.StartEgressRequest) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tps := m.pending[req.EgressId]\n\tdelete(m.pending, req.EgressId)\n\tm.requests.Dec()\n\tswitch req.Request.(type) {\n\tcase *rpc.StartEgressRequest_RoomComposite, *rpc.StartEgressRequest_Web, *rpc.StartEgressRequest_Replay:\n\t\tif ps != nil && ps.countedAsWeb {\n\t\t\tm.webRequests.Dec()\n\t\t}\n\t}\n}\n\nfunc (m *Monitor) EgressEnded(req *rpc.StartEgressRequest) (float64, float64, int) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tvar countedAsWeb bool\n\tif ps := m.pending[req.EgressId]; ps != nil {\n\t\tcountedAsWeb = ps.countedAsWeb\n\t} else {\n\t\tfor _, s := range m.procStats {\n\t\t\tif s.egressID == req.EgressId {\n\t\t\t\tcountedAsWeb = s.countedAsWeb\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch req.Request.(type) {\n\tcase *rpc.StartEgressRequest_RoomComposite:\n\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeRoomComposite}).Sub(1)\n\t\tif countedAsWeb {\n\t\t\tm.webRequests.Dec()\n\t\t}\n\tcase *rpc.StartEgressRequest_Web:\n\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeWeb}).Sub(1)\n\t\tm.webRequests.Dec()\n\tcase *rpc.StartEgressRequest_Participant:\n\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeParticipant}).Sub(1)\n\tcase *rpc.StartEgressRequest_TrackComposite:\n\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeTrackComposite}).Sub(1)\n\tcase *rpc.StartEgressRequest_Track:\n\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeTrack}).Sub(1)\n\tcase *rpc.StartEgressRequest_Replay:\n\t\treplayReq := req.Request.(*rpc.StartEgressRequest_Replay).Replay\n\t\tswitch replayReq.Source.(type) {\n\t\tcase *livekit.ExportReplayRequest_Template:\n\t\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeTemplate}).Sub(1)\n\t\t\tif countedAsWeb {\n\t\t\t\tm.webRequests.Dec()\n\t\t\t}\n\t\tcase *livekit.ExportReplayRequest_Web:\n\t\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeWeb}).Sub(1)\n\t\t\tm.webRequests.Dec()\n\t\tcase *livekit.ExportReplayRequest_Media:\n\t\t\tm.requestGauge.With(prometheus.Labels{\"type\": types.RequestTypeMedia}).Sub(1)\n\t\t}\n\t}\n\n\tdelete(m.pending, req.EgressId)\n\tm.requests.Dec()\n\n\tfor pid, ps := range m.procStats {\n\t\tif ps.egressID == req.EgressId {\n\t\t\tdelete(m.procStats, pid)\n\t\t\treturn ps.totalCPU / float64(ps.cpuCounter), ps.maxCPU, ps.maxMemory\n\t\t}\n\t}\n\n\treturn 0, 0, 0\n}\n\nfunc (m *Monitor) GetAvailableCPU() float64 {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\t_, available, _, _ := m.getCPUUsageLocked()\n\treturn available\n}\n\nfunc (m *Monitor) getCPUUsageLocked() (total, available, pending, used float64) {\n\ttotal = m.cpuStats.NumCPU()\n\tif m.requests.Load() == 0 {\n\t\t// if no requests, use total\n\t\tavailable = total\n\t\treturn\n\t}\n\n\tfor _, ps := range m.pending {\n\t\tif ps.pendingCPU > ps.lastCPU {\n\t\t\tpending += ps.pendingCPU\n\t\t} else {\n\t\t\tpending += ps.lastCPU\n\t\t}\n\t}\n\tfor _, ps := range m.procStats {\n\t\tif ps.pendingCPU > ps.lastCPU {\n\t\t\tused += ps.pendingCPU\n\t\t} else {\n\t\t\tused += ps.lastCPU\n\t\t}\n\t}\n\n\t// if already running requests, cap usage at MaxCpuUtilization\n\tavailable = total*m.cpuCostConfig.MaxCpuUtilization - pending - used\n\treturn\n}\n\nfunc (m *Monitor) GetAvailableMemory() float64 {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tif m.cpuCostConfig.MaxMemory == 0 {\n\t\treturn float64(memory.FreeMemory()) / gb\n\t}\n\n\treturn m.cpuCostConfig.MaxMemory - m.memoryUsage\n}\n\nfunc (m *Monitor) updateEgressStats(stats *hwstats.ProcStats) {\n\tload := 1 - stats.CpuIdle/m.cpuStats.NumCPU()\n\tm.promCPULoad.Set(load)\n\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tmaxCPU := 0.0\n\tvar maxCPUEgress string\n\tfor pid, cpuUsage := range stats.Cpu {\n\t\tprocStats := m.procStats[pid]\n\t\tif procStats == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tprocStats.lastCPU = cpuUsage\n\t\tprocStats.totalCPU += cpuUsage\n\t\tprocStats.cpuCounter++\n\t\tif cpuUsage > procStats.maxCPU {\n\t\t\tprocStats.maxCPU = cpuUsage\n\t\t}\n\n\t\tif cpuUsage > procStats.allowedCPU && cpuUsage > maxCPU {\n\t\t\tmaxCPU = cpuUsage\n\t\t\tmaxCPUEgress = procStats.egressID\n\t\t}\n\t}\n\n\tcpuKillThreshold := defaultKillThreshold\n\tif cpuKillThreshold <= m.cpuCostConfig.MaxCpuUtilization {\n\t\tcpuKillThreshold = (1 + m.cpuCostConfig.MaxCpuUtilization) / 2\n\t}\n\n\tif load > cpuKillThreshold {\n\t\tlogger.Warnw(\"high cpu usage\", nil,\n\t\t\t\"cpu\", load,\n\t\t\t\"requests\", m.requests.Load(),\n\t\t)\n\n\t\tif m.requests.Load() > 1 {\n\t\t\tm.highCPUDuration++\n\t\t\tif m.highCPUDuration >= minKillDuration {\n\t\t\t\tm.svc.KillProcess(maxCPUEgress, errors.ErrCPUExhausted(maxCPU))\n\t\t\t\tm.highCPUDuration = 0\n\t\t\t}\n\t\t}\n\t}\n\n\ttotalMemory := 0\n\tmaxMemory := 0\n\tvar maxMemoryEgress string\n\tvar maxMemoryGroup *hwstats.GroupMemory\n\tfor pid, gm := range stats.Memory {\n\t\ttotalMemory += gm.Total\n\n\t\tprocStats := m.procStats[pid]\n\t\tif procStats == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif gm.Total > procStats.maxMemory {\n\t\t\tprocStats.maxMemory = gm.Total\n\t\t}\n\t\tif gm.Total > maxMemory {\n\t\t\tmaxMemory = gm.Total\n\t\t\tmaxMemoryEgress = procStats.egressID\n\t\t\tmaxMemoryGroup = gm\n\t\t}\n\t}\n\n\tm.memoryUsage = float64(totalMemory) / gb\n\tm.promProcRSS.Set(float64(totalMemory))\n\n\tm.maybeLogMemoryUsage(stats.Memory)\n\n\tm.updateCgroupStats()\n\n\tm.updateWouldRejectMetrics()\n\n\tm.checkMemoryKill(maxMemoryEgress, maxMemoryGroup)\n}\n\n// maybeLogMemoryUsage periodically logs per-group process RSS to aid memory leak diagnosis.\nfunc (m *Monitor) maybeLogMemoryUsage(memory map[int]*hwstats.GroupMemory) {\n\tnow := time.Now()\n\tif now.Sub(m.lastMemoryDump) < memoryUsageDumpInterval {\n\t\treturn\n\t}\n\tm.lastMemoryDump = now\n\n\tfor groupPID, gm := range memory {\n\t\tegressID := \"\"\n\t\tif ps := m.procStats[groupPID]; ps != nil {\n\t\t\tegressID = ps.egressID\n\t\t}\n\t\tlogger.Infow(\"current memory usage\",\n\t\t\t\"egressID\", egressID,\n\t\t\t\"groupPID\", groupPID,\n\t\t\t\"totalRSSBytes\", gm.Total,\n\t\t\t\"processes\", gm.Procs,\n\t\t)\n\t}\n}\n\n// updateCgroupStats reads cgroup memory statistics and updates metrics.\nfunc (m *Monitor) updateCgroupStats() {\n\tif m.cgroupMemStats == nil {\n\t\tm.cgroupOK = false\n\t\tm.promCgroupReadSuccess.Set(0)\n\t\treturn\n\t}\n\n\tusageBytes, _, err := m.cgroupMemStats.GetMemory()\n\tif err != nil {\n\t\tm.cgroupOK = false\n\t\tm.promCgroupReadSuccess.Set(0)\n\t\t// Throttle error logging (CompareAndSwap ensures we log only once)\n\t\tif m.cgroupErrorLogged.CompareAndSwap(false, true) {\n\t\t\tlogger.Warnw(\"failed to read cgroup memory stats, falling back to proc_rss\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tm.cgroupOK = true\n\tm.cgroupErrorLogged.Store(false)\n\tm.cgroupUsageBytes = usageBytes\n\n\tm.promCgroupReadSuccess.Set(1)\n\tm.promCgroupMemory.Set(float64(usageBytes))\n}\n\n// updateWouldRejectMetrics computes what admission would do with alternative memory sources.\nfunc (m *Monitor) updateWouldRejectMetrics() {\n\tif !m.cgroupOK || m.cpuCostConfig.MaxMemory == 0 {\n\t\treturn\n\t}\n\n\tpendingMem := m.pendingMemoryUsage.Load()\n\theadroom := memoryHeadroomGB\n\tmaxMem := m.cpuCostConfig.MaxMemory\n\n\t// Would reject with cgroup?\n\tcgroupUsageGB := float64(m.cgroupUsageBytes) / gb\n\tif cgroupUsageGB+pendingMem+m.cpuCostConfig.MemoryCost+headroom >= maxMem {\n\t\tm.promWouldRejectCgroup.Set(1)\n\t} else {\n\t\tm.promWouldRejectCgroup.Set(0)\n\t}\n}\n\n// checkMemoryKill evaluates whether to kill a process based on memory usage.\nfunc (m *Monitor) checkMemoryKill(maxMemoryEgress string, maxMemoryGroup *hwstats.GroupMemory) {\n\tif m.cpuCostConfig.MaxMemory == 0 {\n\t\treturn\n\t}\n\n\tmaxMemoryBytes := uint64(m.cpuCostConfig.MaxMemory * gb)\n\tkillTriggerBytes := uint64(m.memoryUsage * gb)\n\n\tswitch m.cpuCostConfig.MemorySource {\n\tcase config.MemorySourceCgroup:\n\t\tif m.cgroupOK {\n\t\t\tkillTriggerBytes = m.cgroupUsageBytes\n\t\t}\n\tdefault: // proc_rss\n\t}\n\n\tif killTriggerBytes > maxMemoryBytes {\n\t\t// Apply grace period if configured.\n\t\tif m.highMemoryStart.IsZero() {\n\t\t\tm.highMemoryStart = time.Now()\n\t\t}\n\t\tif time.Since(m.highMemoryStart) >= time.Duration(m.cpuCostConfig.MemoryKillGraceSec)*time.Second {\n\t\t\tkillTriggerGB := float64(killTriggerBytes) / gb\n\t\t\tlogger.Warnw(\"high memory usage\", nil,\n\t\t\t\t\"source\", m.cpuCostConfig.MemorySource,\n\t\t\t\t\"memoryGB\", killTriggerGB,\n\t\t\t\t\"maxMemoryGB\", m.cpuCostConfig.MaxMemory,\n\t\t\t\t\"requests\", m.requests.Load(),\n\t\t\t)\n\t\t\tif maxMemoryGroup != nil {\n\t\t\t\tlogger.Infow(\"killing egress process memory\",\n\t\t\t\t\t\"egressID\", maxMemoryEgress, \"processes\", maxMemoryGroup.Procs)\n\t\t\t}\n\t\t\t// Report the actual memory that triggered the kill, not per-process max\n\t\t\tm.svc.KillProcess(maxMemoryEgress, errors.ErrOOM(killTriggerGB))\n\t\t\tm.highMemoryStart = time.Time{}\n\t\t}\n\t} else {\n\t\tm.highMemoryStart = time.Time{}\n\t}\n}\n"
  },
  {
    "path": "pkg/stats/monitor_memory_test.go",
    "content": "// Copyright 2026 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage stats\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n)\n\nfunc TestCheckMemoryAdmissionLocked_Legacy(t *testing.T) {\n\tm := &Monitor{\n\t\tcpuCostConfig: &config.CPUCostConfig{\n\t\t\tMaxMemory:    10, // 10 GB\n\t\t\tMemoryCost:   1,  // 1 GB per request\n\t\t\tMemorySource: config.MemorySourceProcRSS,\n\t\t},\n\t\tmemoryUsage: 5, // 5 GB current usage\n\t}\n\n\t// 5 + 0 (pending) + 1 (cost) + 1 (headroom) = 7 < 10, should accept\n\treject, _ := m.checkMemoryAdmissionLocked()\n\trequire.False(t, reject)\n\n\t// Increase usage to trigger rejection\n\tm.memoryUsage = 8 // 8 + 0 + 1 + 1 = 10 >= 10, should reject\n\treject, reason := m.checkMemoryAdmissionLocked()\n\trequire.True(t, reject)\n\trequire.Equal(t, \"memory\", reason)\n}\n\nfunc TestCheckMemoryAdmissionLocked_CgroupWorkingSet(t *testing.T) {\n\tm := &Monitor{\n\t\tcpuCostConfig: &config.CPUCostConfig{\n\t\t\tMaxMemory:    10,\n\t\t\tMemoryCost:   1,\n\t\t\tMemorySource: config.MemorySourceCgroup,\n\t\t},\n\t\tcgroupUsageBytes: 5 * gb,\n\t\tcgroupOK:         true,\n\t}\n\n\t// Working set is 5 GB, should accept\n\treject, _ := m.checkMemoryAdmissionLocked()\n\trequire.False(t, reject)\n\n\t// Increase working set to trigger rejection\n\tm.cgroupUsageBytes = 8 * gb\n\treject, reason := m.checkMemoryAdmissionLocked()\n\trequire.True(t, reject)\n\trequire.Equal(t, \"memory_cgroup\", reason)\n}\n\nfunc TestCheckMemoryAdmissionLocked_FallbackToProcRSS(t *testing.T) {\n\tm := &Monitor{\n\t\tcpuCostConfig: &config.CPUCostConfig{\n\t\t\tMaxMemory:    10,\n\t\t\tMemoryCost:   1,\n\t\t\tMemorySource: config.MemorySourceCgroup,\n\t\t},\n\t\tmemoryUsage: 5,\n\t\tcgroupOK:    false, // cgroup not available\n\t}\n\n\t// Should fall back to proc_rss\n\treject, _ := m.checkMemoryAdmissionLocked()\n\trequire.False(t, reject) // 5 + 0 + 1 + 1 = 7 < 10\n\n\tm.memoryUsage = 8\n\treject, reason := m.checkMemoryAdmissionLocked()\n\trequire.True(t, reject)\n\trequire.Equal(t, \"memory\", reason) // proc_rss reason\n}\n\nfunc TestCheckMemoryAdmissionLocked_NoMaxMemory(t *testing.T) {\n\tm := &Monitor{\n\t\tcpuCostConfig: &config.CPUCostConfig{\n\t\t\tMaxMemory:    0, // disabled\n\t\t\tMemorySource: config.MemorySourceCgroup,\n\t\t},\n\t\tmemoryUsage: 100,\n\t}\n\n\t// Should not reject when MaxMemory is 0\n\treject, _ := m.checkMemoryAdmissionLocked()\n\trequire.False(t, reject)\n}\n\nfunc TestCheckMemoryAdmissionLocked_WithPendingMemory(t *testing.T) {\n\tm := &Monitor{\n\t\tcpuCostConfig: &config.CPUCostConfig{\n\t\t\tMaxMemory:    10,\n\t\t\tMemoryCost:   1,\n\t\t\tMemorySource: config.MemorySourceProcRSS,\n\t\t},\n\t\tmemoryUsage: 5,\n\t}\n\n\t// Add pending memory\n\tm.pendingMemoryUsage.Store(2) // 5 + 2 + 1 + 1 = 9 < 10\n\treject, _ := m.checkMemoryAdmissionLocked()\n\trequire.False(t, reject)\n\n\tm.pendingMemoryUsage.Store(3) // 5 + 3 + 1 + 1 = 10 >= 10\n\treject, reason := m.checkMemoryAdmissionLocked()\n\trequire.True(t, reject)\n\trequire.Equal(t, \"memory\", reason)\n}\n\nfunc TestCheckProcRSSMemoryAdmission(t *testing.T) {\n\tm := &Monitor{\n\t\tmemoryUsage: 5,\n\t}\n\n\t// Various scenarios\n\treject, _ := m.checkProcRSSMemoryAdmission(0, 1, 1, 10)\n\trequire.False(t, reject) // 5 + 0 + 1 + 1 = 7 < 10\n\n\treject, _ = m.checkProcRSSMemoryAdmission(2, 1, 1, 10)\n\trequire.False(t, reject) // 5 + 2 + 1 + 1 = 9 < 10\n\n\treject, reason := m.checkProcRSSMemoryAdmission(3, 1, 1, 10)\n\trequire.True(t, reject) // 5 + 3 + 1 + 1 = 10 >= 10\n\trequire.Equal(t, \"memory\", reason)\n}\n"
  },
  {
    "path": "pkg/stats/monitor_prom.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage stats\n\nimport (\n\t\"github.com/prometheus/client_golang/prometheus\"\n\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/rpc\"\n)\n\nfunc (m *Monitor) initPrometheus() {\n\tpromNodeAvailable := prometheus.NewGaugeFunc(prometheus.GaugeOpts{\n\t\tNamespace:   \"livekit\",\n\t\tSubsystem:   \"egress\",\n\t\tName:        \"available\",\n\t\tConstLabels: prometheus.Labels{\"node_id\": m.nodeID, \"cluster_id\": m.clusterID},\n\t}, m.promIsIdle)\n\n\tpromCanAcceptRequest := prometheus.NewGaugeFunc(prometheus.GaugeOpts{\n\t\tNamespace:   \"livekit\",\n\t\tSubsystem:   \"egress\",\n\t\tName:        \"can_accept_request\",\n\t\tConstLabels: prometheus.Labels{\"node_id\": m.nodeID, \"cluster_id\": m.clusterID},\n\t}, m.promCanAcceptRequest)\n\n\tpromIsDisabled := prometheus.NewGaugeFunc(prometheus.GaugeOpts{\n\t\tNamespace:   \"livekit\",\n\t\tSubsystem:   \"egress\",\n\t\tName:        \"is_disabled\",\n\t\tConstLabels: prometheus.Labels{\"node_id\": m.nodeID, \"cluster_id\": m.clusterID},\n\t}, m.promIsDisabled)\n\n\tpromIsTerminating := prometheus.NewGaugeFunc(prometheus.GaugeOpts{\n\t\tNamespace:   \"livekit\",\n\t\tSubsystem:   \"egress\",\n\t\tName:        \"is_terminating\",\n\t\tConstLabels: prometheus.Labels{\"node_id\": m.nodeID, \"cluster_id\": m.clusterID},\n\t}, m.promIsTerminating)\n\n\tm.promCPULoad = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace:   \"livekit\",\n\t\tSubsystem:   \"node\",\n\t\tName:        \"cpu_load\",\n\t\tConstLabels: prometheus.Labels{\"node_id\": m.nodeID, \"node_type\": \"EGRESS\", \"cluster_id\": m.clusterID},\n\t})\n\n\tm.requestGauge = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tNamespace:   \"livekit\",\n\t\tSubsystem:   \"egress\",\n\t\tName:        \"requests\",\n\t\tConstLabels: prometheus.Labels{\"node_id\": m.nodeID, \"cluster_id\": m.clusterID},\n\t}, []string{\"type\"})\n\n\t// Cgroup memory metrics\n\tm.promCgroupMemory = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace:   \"livekit\",\n\t\tSubsystem:   \"egress\",\n\t\tName:        \"cgroup_memory_bytes\",\n\t\tHelp:        \"Cgroup memory usage in bytes\",\n\t\tConstLabels: prometheus.Labels{\"node_id\": m.nodeID, \"cluster_id\": m.clusterID},\n\t})\n\n\tm.promCgroupReadSuccess = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace:   \"livekit\",\n\t\tSubsystem:   \"egress\",\n\t\tName:        \"cgroup_read_success\",\n\t\tHelp:        \"Whether cgroup memory read succeeded (1) or failed (0)\",\n\t\tConstLabels: prometheus.Labels{\"node_id\": m.nodeID, \"cluster_id\": m.clusterID},\n\t})\n\n\tm.promProcRSS = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace:   \"livekit\",\n\t\tSubsystem:   \"egress\",\n\t\tName:        \"proc_rss_bytes\",\n\t\tHelp:        \"Per-process RSS sum in bytes\",\n\t\tConstLabels: prometheus.Labels{\"node_id\": m.nodeID, \"cluster_id\": m.clusterID},\n\t})\n\n\tm.promWouldRejectCgroup = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace:   \"livekit\",\n\t\tSubsystem:   \"egress\",\n\t\tName:        \"would_reject_cgroup\",\n\t\tHelp:        \"Whether request would be rejected using cgroup mode (1) or not (0)\",\n\t\tConstLabels: prometheus.Labels{\"node_id\": m.nodeID, \"cluster_id\": m.clusterID},\n\t})\n\n\tprometheus.MustRegister(\n\t\tpromNodeAvailable, promCanAcceptRequest, promIsDisabled, promIsTerminating,\n\t\tm.promCPULoad, m.requestGauge,\n\t\tm.promCgroupMemory,\n\t\tm.promCgroupReadSuccess, m.promProcRSS,\n\t\tm.promWouldRejectCgroup,\n\t)\n}\n\nfunc (m *Monitor) promIsIdle() float64 {\n\tif m.svc.IsIdle() {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc (m *Monitor) promCanAcceptRequest() float64 {\n\tm.mu.Lock()\n\t_, canAccept := m.canAcceptRequestLocked(&rpc.StartEgressRequest{\n\t\tRequest: &rpc.StartEgressRequest_Web{Web: &livekit.WebEgressRequest{}},\n\t})\n\tm.mu.Unlock()\n\n\tif !m.svc.IsDisabled() && canAccept {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc (m *Monitor) promIsDisabled() float64 {\n\tif m.svc.IsDisabled() {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc (m *Monitor) promIsTerminating() float64 {\n\tif m.svc.IsTerminating() {\n\t\treturn 1\n\t}\n\treturn 0\n}\n"
  },
  {
    "path": "pkg/types/types.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage types\n\ntype RequestType string\ntype SourceType string\ntype EgressType string\ntype MimeType string\ntype Profile string\ntype OutputType string\ntype FileExtension string\n\nconst (\n\t// request types\n\tRequestTypeTemplate = \"template\"\n\tRequestTypeWeb      = \"web\"\n\tRequestTypeMedia    = \"media\"\n\n\tRequestTypeRoomComposite  = \"room_composite\"\n\tRequestTypeParticipant    = \"participant\"\n\tRequestTypeTrackComposite = \"track_composite\"\n\tRequestTypeTrack          = \"track\"\n\n\t// source types\n\tSourceTypeWeb SourceType = \"web\"\n\tSourceTypeSDK SourceType = \"sdk\"\n\n\t// egress types\n\tEgressTypeStream    EgressType = \"stream\"\n\tEgressTypeWebsocket EgressType = \"websocket\"\n\tEgressTypeFile      EgressType = \"file\"\n\tEgressTypeSegments  EgressType = \"segments\"\n\tEgressTypeImages    EgressType = \"images\"\n\n\t// input types\n\tMimeTypeAAC      MimeType = \"audio/aac\"\n\tMimeTypeOpus     MimeType = \"audio/opus\"\n\tMimeTypeRawAudio MimeType = \"audio/x-raw\"\n\tMimeTypeH264     MimeType = \"video/h264\"\n\tMimeTypeVP8      MimeType = \"video/vp8\"\n\tMimeTypeVP9      MimeType = \"video/vp9\"\n\tMimeTypeJPEG     MimeType = \"image/jpeg\"\n\tMimeTypeRawVideo MimeType = \"video/x-raw\"\n\tMimeTypeMP3      MimeType = \"audio/mpeg\"\n\tMimeTypePCMU     MimeType = \"audio/pcmu\"\n\tMimeTypePCMA     MimeType = \"audio/pcma\"\n\n\t// video profiles\n\tProfileBaseline Profile = \"baseline\"\n\tProfileMain     Profile = \"main\"\n\tProfileHigh     Profile = \"high\"\n\n\t// output types\n\tOutputTypeUnknownFile OutputType = \"\"\n\tOutputTypeRaw         OutputType = \"audio/x-raw\"\n\tOutputTypeOGG         OutputType = \"audio/ogg\"\n\tOutputTypeMP3         OutputType = \"audio/mpeg\"\n\tOutputTypeIVF         OutputType = \"video/x-ivf\"\n\tOutputTypeMP4         OutputType = \"video/mp4\"\n\tOutputTypeTS          OutputType = \"video/mp2t\"\n\tOutputTypeWebM        OutputType = \"video/webm\"\n\tOutputTypeJPEG        OutputType = \"image/jpeg\"\n\tOutputTypeRTMP        OutputType = \"rtmp\"\n\tOutputTypeSRT         OutputType = \"srt\"\n\tOutputTypeHLS         OutputType = \"application/x-mpegurl\"\n\tOutputTypeJSON        OutputType = \"application/json\"\n\tOutputTypeBlob        OutputType = \"application/octet-stream\"\n\n\t// file extensions\n\tFileExtensionRaw  = \".raw\"\n\tFileExtensionOGG  = \".ogg\"\n\tFileExtensionMP3  = \".mp3\"\n\tFileExtensionIVF  = \".ivf\"\n\tFileExtensionMP4  = \".mp4\"\n\tFileExtensionTS   = \".ts\"\n\tFileExtensionWebM = \".webm\"\n\tFileExtensionM3U8 = \".m3u8\"\n\tFileExtensionJPEG = \".jpeg\"\n)\n\nvar (\n\tDefaultAudioCodecs = map[OutputType]MimeType{\n\t\tOutputTypeRaw:  MimeTypeRawAudio,\n\t\tOutputTypeOGG:  MimeTypeOpus,\n\t\tOutputTypeMP3:  MimeTypeMP3,\n\t\tOutputTypeMP4:  MimeTypeAAC,\n\t\tOutputTypeTS:   MimeTypeAAC,\n\t\tOutputTypeWebM: MimeTypeOpus,\n\t\tOutputTypeRTMP: MimeTypeAAC,\n\t\tOutputTypeSRT:  MimeTypeAAC,\n\t\tOutputTypeHLS:  MimeTypeAAC,\n\t}\n\n\tDefaultVideoCodecs = map[OutputType]MimeType{\n\t\tOutputTypeIVF:  MimeTypeVP8,\n\t\tOutputTypeMP4:  MimeTypeH264,\n\t\tOutputTypeTS:   MimeTypeH264,\n\t\tOutputTypeWebM: MimeTypeVP8,\n\t\tOutputTypeRTMP: MimeTypeH264,\n\t\tOutputTypeSRT:  MimeTypeH264,\n\t\tOutputTypeHLS:  MimeTypeH264,\n\t}\n\n\tFileExtensions = map[FileExtension]struct{}{\n\t\tFileExtensionRaw:  {},\n\t\tFileExtensionOGG:  {},\n\t\tFileExtensionMP3:  {},\n\t\tFileExtensionIVF:  {},\n\t\tFileExtensionMP4:  {},\n\t\tFileExtensionTS:   {},\n\t\tFileExtensionWebM: {},\n\t\tFileExtensionM3U8: {},\n\t\tFileExtensionJPEG: {},\n\t}\n\n\tFileExtensionForOutputType = map[OutputType]FileExtension{\n\t\tOutputTypeRaw:  FileExtensionRaw,\n\t\tOutputTypeOGG:  FileExtensionOGG,\n\t\tOutputTypeMP3:  FileExtensionMP3,\n\t\tOutputTypeIVF:  FileExtensionIVF,\n\t\tOutputTypeMP4:  FileExtensionMP4,\n\t\tOutputTypeTS:   FileExtensionTS,\n\t\tOutputTypeWebM: FileExtensionWebM,\n\t\tOutputTypeHLS:  FileExtensionM3U8,\n\t\tOutputTypeJPEG: FileExtensionJPEG,\n\t}\n\n\tCodecCompatibility = map[OutputType]map[MimeType]bool{\n\t\tOutputTypeRaw: {\n\t\t\tMimeTypeRawAudio: true,\n\t\t},\n\t\tOutputTypeOGG: {\n\t\t\tMimeTypeOpus: true,\n\t\t},\n\t\tOutputTypeIVF: {\n\t\t\tMimeTypeVP8: true,\n\t\t\tMimeTypeVP9: true,\n\t\t},\n\t\tOutputTypeMP4: {\n\t\t\tMimeTypeAAC:  true,\n\t\t\tMimeTypeOpus: true,\n\t\t\tMimeTypeH264: true,\n\t\t},\n\t\tOutputTypeTS: {\n\t\t\tMimeTypeAAC:  true,\n\t\t\tMimeTypeOpus: true,\n\t\t\tMimeTypeH264: true,\n\t\t},\n\t\tOutputTypeWebM: {\n\t\t\tMimeTypeOpus: true,\n\t\t\tMimeTypeVP8:  true,\n\t\t\tMimeTypeVP9:  true,\n\t\t},\n\t\tOutputTypeRTMP: {\n\t\t\tMimeTypeAAC:  true,\n\t\t\tMimeTypeH264: true,\n\t\t},\n\t\tOutputTypeSRT: {\n\t\t\tMimeTypeAAC:  true,\n\t\t\tMimeTypeH264: true,\n\t\t},\n\t\tOutputTypeHLS: {\n\t\t\tMimeTypeAAC:  true,\n\t\t\tMimeTypeH264: true,\n\t\t},\n\t\tOutputTypeMP3: {\n\t\t\tMimeTypeMP3:      true,\n\t\t\tMimeTypeOpus:     true,\n\t\t\tMimeTypeAAC:      true,\n\t\t\tMimeTypeRawAudio: true,\n\t\t},\n\t\tOutputTypeUnknownFile: {\n\t\t\tMimeTypeAAC:  true,\n\t\t\tMimeTypeOpus: true,\n\t\t\tMimeTypeMP3:  true,\n\t\t\tMimeTypeH264: true,\n\t\t\tMimeTypeVP8:  true,\n\t\t\tMimeTypeVP9:  true,\n\t\t},\n\t}\n\n\tAllOutputAudioCodecs = map[MimeType]bool{\n\t\tMimeTypeAAC:      true,\n\t\tMimeTypeOpus:     true,\n\t\tMimeTypeRawAudio: true,\n\t\tMimeTypeMP3:      true,\n\t}\n\n\tAllOutputVideoCodecs = map[MimeType]bool{\n\t\tMimeTypeH264: true,\n\t}\n\n\tAudioOnlyFileOutputTypes = []OutputType{\n\t\tOutputTypeOGG,\n\t\tOutputTypeMP4,\n\t\tOutputTypeMP3,\n\t}\n\tVideoOnlyFileOutputTypes = []OutputType{\n\t\tOutputTypeMP4,\n\t}\n\tAudioVideoFileOutputTypes = []OutputType{\n\t\tOutputTypeMP4,\n\t}\n\n\tTrackOutputTypes = map[MimeType]OutputType{\n\t\tMimeTypeOpus: OutputTypeOGG,\n\t\tMimeTypePCMU: OutputTypeOGG,\n\t\tMimeTypePCMA: OutputTypeOGG,\n\t\tMimeTypeH264: OutputTypeMP4,\n\t\tMimeTypeVP8:  OutputTypeWebM,\n\t\tMimeTypeVP9:  OutputTypeWebM,\n\t}\n\n\tStreamOutputTypes = map[string]OutputType{\n\t\t\"rtmp\":   OutputTypeRTMP,\n\t\t\"rtmps\":  OutputTypeRTMP,\n\t\t\"mux\":    OutputTypeRTMP,\n\t\t\"twitch\": OutputTypeRTMP,\n\t\t\"srt\":    OutputTypeSRT,\n\t\t\"ws\":     OutputTypeRaw,\n\t\t\"wss\":    OutputTypeRaw,\n\t}\n)\n\nfunc GetOutputTypeCompatibleWithCodecs(types []OutputType, audioCodecs map[MimeType]bool, videoCodecs map[MimeType]bool) OutputType {\n\tfor _, t := range types {\n\t\tif audioCodecs != nil && !IsOutputTypeCompatibleWithCodecs(t, audioCodecs) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif videoCodecs != nil && !IsOutputTypeCompatibleWithCodecs(t, videoCodecs) {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn t\n\t}\n\n\treturn OutputTypeUnknownFile\n}\n\nfunc IsOutputTypeCompatibleWithCodecs(ot OutputType, codecs map[MimeType]bool) bool {\n\tfor k := range codecs {\n\t\tif CodecCompatibility[ot][k] {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc GetMapIntersection[K comparable](mapA map[K]bool, mapB map[K]bool) map[K]bool {\n\tres := make(map[K]bool)\n\n\tfor k := range mapA {\n\t\tif mapB[k] {\n\t\t\tres[k] = true\n\t\t}\n\t}\n\n\treturn res\n}\n"
  },
  {
    "path": "pkg/types/types_test.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage types\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestGetMapIntersection(t *testing.T) {\n\tlist := make(map[MimeType]bool)\n\n\tres := GetMapIntersection(list, CodecCompatibility[OutputTypeUnknownFile])\n\trequire.Empty(t, res)\n\n\tlist[MimeTypeH264] = true\n\tres = GetMapIntersection(list, CodecCompatibility[OutputTypeOGG])\n\trequire.Empty(t, res)\n\n\tlist[MimeTypeVP8] = true\n\tres = GetMapIntersection(list, CodecCompatibility[OutputTypeMP4])\n\trequire.Equal(t, map[MimeType]bool{MimeTypeH264: true}, res)\n}\n\nfunc TestGetOutputTypesCompatibleWithCodecs(t *testing.T) {\n\toutputTypes := make([]OutputType, 0)\n\taudioCodecs := make(map[MimeType]bool)\n\tvideoCodecs := make(map[MimeType]bool)\n\n\tres := GetOutputTypeCompatibleWithCodecs(outputTypes, audioCodecs, videoCodecs)\n\trequire.Empty(t, res)\n\n\toutputTypes = append(outputTypes, OutputTypeOGG, OutputTypeMP4)\n\tres = GetOutputTypeCompatibleWithCodecs(outputTypes, audioCodecs, videoCodecs)\n\trequire.Empty(t, res)\n\n\taudioCodecs[MimeTypeAAC] = true\n\toutputTypes = append(outputTypes, OutputTypeMP4)\n\tres = GetOutputTypeCompatibleWithCodecs(outputTypes, audioCodecs, videoCodecs)\n\trequire.Empty(t, res)\n\n\tvideoCodecs[MimeTypeVP8] = true\n\toutputTypes = append(outputTypes, OutputTypeMP4)\n\tres = GetOutputTypeCompatibleWithCodecs(outputTypes, audioCodecs, videoCodecs)\n\trequire.Empty(t, res)\n\n\tvideoCodecs[MimeTypeH264] = true\n\toutputTypes = append(outputTypes, OutputTypeMP4)\n\tres = GetOutputTypeCompatibleWithCodecs(outputTypes, audioCodecs, videoCodecs)\n\trequire.Equal(t, OutputTypeMP4, res)\n}\n"
  },
  {
    "path": "renovate.json",
    "content": "{\n  \"$schema\": \"https://docs.renovatebot.com/renovate-schema.json\",\n  \"extends\": [\n    \"config:base\"\n  ],\n  \"commitBody\": \"Generated by renovateBot\",\n  \"packageRules\": [\n    {\n      \"matchManagers\": [\"github-actions\"],\n      \"groupName\": \"github workflows\"\n    },\n    {\n      \"matchManagers\": [\"dockerfile\"],\n      \"groupName\": \"docker deps\"\n    },\n    {\n      \"matchManagers\": [\"npm\"],\n      \"groupName\": \"npm deps\"\n    },\n    {\n      \"matchManagers\": [\"gomod\"],\n      \"groupName\": \"go deps\"\n    },\n    {\n      \"matchPackagePrefixes\": [\"github.com/grafov/m3u8\"],\n      \"enabled\": false\n    }\n  ],\n  \"postUpdateOptions\": [\n    \"gomodTidy\"\n  ],\n  \"schedule\": [\"on sunday\"],\n  \"updateNotScheduled\": false\n}\n"
  },
  {
    "path": "template-default/.gitignore",
    "content": "# Logs\nlogs\n*.log\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\npnpm-debug.log*\nlerna-debug.log*\n\nnode_modules\nbuild\ndist-ssr\n*.local\n\n# Editor directories and files\n.vscode/*\n!.vscode/extensions.json\n.idea\n.DS_Store\n*.suo\n*.ntvs*\n*.njsproj\n*.sln\n*.sw?\n"
  },
  {
    "path": "template-default/.prettierrc",
    "content": "{\n  \"singleQuote\": true,\n  \"trailingComma\": \"all\",\n  \"semi\": true,\n  \"tabWidth\": 2,\n  \"printWidth\": 100,\n  \"plugins\": [],\n  \"pluginSearchDirs\": [\".\"]\n}\n"
  },
  {
    "path": "template-default/README.md",
    "content": "# Default LiveKit Recording Templates\n\nThis repo contains the default recording template used with LiveKit Egress. The templates are deployed alongside and served by the egress service.\n\nSee docs [here](https://docs.livekit.io/guides/egress/room-composite/#default-layouts)\n"
  },
  {
    "path": "template-default/eslint.config.js",
    "content": "import js from '@eslint/js'\nimport globals from 'globals'\nimport reactHooks from 'eslint-plugin-react-hooks'\nimport reactRefresh from 'eslint-plugin-react-refresh'\nimport tseslint from 'typescript-eslint'\nimport { globalIgnores } from 'eslint/config'\n\nexport default tseslint.config([\n  globalIgnores(['build']),\n  {\n    files: ['**/*.{ts,tsx}'],\n    extends: [\n      js.configs.recommended,\n      tseslint.configs.recommended,\n      reactHooks.configs['recommended-latest'],\n      reactRefresh.configs.vite,\n    ],\n    languageOptions: {\n      ecmaVersion: 2020,\n      globals: globals.browser,\n    },\n  },\n])\n"
  },
  {
    "path": "template-default/index.html",
    "content": "<!DOCTYPE html>\n<html lang=\"en\">\n  <head>\n    <meta charset=\"utf-8\" />\n    <link rel=\"icon\" href=\"favicon.ico\" />\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />\n    <meta name=\"theme-color\" content=\"#000000\" />\n    <meta name=\"description\" content=\"Recording interface for LiveKit\" />\n    <link rel=\"apple-touch-icon\" href=\"logo.png\" />\n    <!--\n      manifest.json provides metadata used when your web app is installed on a\n      user's mobile device or desktop. See https://developers.google.com/web/fundamentals/web-app-manifest/\n    -->\n    <link rel=\"manifest\" href=\"manifest.json\" />\n    <title>LiveKit Egress</title>\n  </head>\n  <body>\n    <noscript>You need to enable JavaScript to run this app.</noscript>\n    <div id=\"root\"></div>\n\t<script type=\"module\" src=\"/src/index.tsx\"></script>\n    <!--\n      This HTML file is a template.\n      If you open it directly in the browser, you will see an empty page.\n\n      You can add webfonts, meta tags, or analytics to this file.\n      The build step will place the bundled scripts into the <body> tag.\n\n      To begin the development, run `npm start` or `pnpm start`.\n      To create a production bundle, use `npm run build` or `pnpm build`.\n    -->\n  </body>\n</html>\n"
  },
  {
    "path": "template-default/package.json",
    "content": "{\n  \"name\": \"livekit-egress-web\",\n  \"homepage\": \"https://livekit.io\",\n  \"description\": \"Default templates for RoomComposite egress\",\n  \"version\": \"0.2.1\",\n  \"private\": true,\n  \"dependencies\": {\n    \"@livekit/components-core\": \"^0.11.11\",\n    \"@livekit/components-react\": \"^2.9.14\",\n    \"@livekit/components-styles\": \"^1.1.6\",\n    \"@livekit/egress-sdk\": \"^0.2.1\",\n    \"livekit-client\": \"^2.15.6\",\n    \"react\": \"^19.1.1\",\n    \"react-dom\": \"^19.1.1\"\n  },\n  \"scripts\": {\n    \"dev\": \"vite\",\n    \"build\": \"vite build\",\n    \"lint\": \"eslint .\",\n    \"preview\": \"vite preview\"\n  },\n  \"devDependencies\": {\n    \"@eslint/js\": \"^9.33.0\",\n    \"@types/react\": \"^19.1.10\",\n    \"@types/react-dom\": \"^19.1.7\",\n    \"@vitejs/plugin-react\": \"^5.0.0\",\n    \"eslint\": \"^9.39.1\",\n    \"eslint-plugin-react-hooks\": \"^5.2.0\",\n    \"eslint-plugin-react-refresh\": \"^0.4.20\",\n    \"globals\": \"^16.3.0\",\n    \"typescript\": \"~5.8.3\",\n    \"typescript-eslint\": \"^8.39.1\",\n    \"vite\": \"^7.2.2\"\n  }\n}\n"
  },
  {
    "path": "template-default/public/manifest.json",
    "content": "{\n  \"short_name\": \"livekit-egress-web\",\n  \"name\": \"Web template for LiveKit Egress\",\n  \"icons\": [\n    {\n      \"src\": \"favicon.ico\",\n      \"sizes\": \"64x64 32x32 24x24 16x16\",\n      \"type\": \"image/x-icon\"\n    },\n    {\n      \"src\": \"logo.png\",\n      \"type\": \"image/png\",\n      \"sizes\": \"150x150\"\n    }\n  ],\n  \"start_url\": \".\",\n  \"display\": \"standalone\",\n  \"theme_color\": \"#000000\",\n  \"background_color\": \"#ffffff\"\n}\n"
  },
  {
    "path": "template-default/public/robots.txt",
    "content": "# https://www.robotstxt.org/robotstxt.html\nUser-agent: *\nDisallow:\n"
  },
  {
    "path": "template-default/src/App.css",
    "content": "/**\n * Copyright 2023 LiveKit, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nbody {\n  padding: 0;\n  font-family: Avenir, -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu',\n    'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', sans-serif;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n  background: black;\n  color: rgb(211, 210, 210);\n  box-sizing: border-box;\n  margin: 0;\n  height: 100vh;\n  font-size: 12px;\n  overflow: hidden;\n}\n\n.light {\n  background: white;\n}\n\n.roomContainer {\n  height: 100vh;\n}\n\n.error {\n  color: red;\n}\n\n.lk-grid-layout-wrapper {\n  height: 100%;\n}\n\n.lk-focus-layout {\n  height: 100%;\n}\n\n/* things like name, connection quality, etc make less sense in a recording, hide for now */\n.lk-participant-metadata {\n  display: none;\n}\n"
  },
  {
    "path": "template-default/src/App.tsx",
    "content": "/**\n * Copyright 2023 LiveKit, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport '@livekit/components-styles';\nimport '@livekit/components-styles/prefabs';\nimport EgressHelper from '@livekit/egress-sdk';\nimport './App.css';\nimport RoomPage from './Room';\n\nfunction App() {\n  return (\n    <div className=\"container\">\n      <RoomPage\n        // EgressHelper retrieves parameters passed to the page\n        url={EgressHelper.getLiveKitURL()}\n        token={EgressHelper.getAccessToken()}\n        layout={EgressHelper.getLayout()}\n      />\n    </div>\n  );\n}\n\nexport default App;\n"
  },
  {
    "path": "template-default/src/Room.tsx",
    "content": "/**\n * Copyright 2023 LiveKit, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport {\n  GridLayout,\n  LiveKitRoom,\n  ParticipantTile,\n  RoomAudioRenderer,\n  useRoomContext,\n  useTracks,\n} from '@livekit/components-react';\nimport EgressHelper from '@livekit/egress-sdk';\nimport { ConnectionState, Track } from 'livekit-client';\nimport { ReactElement, useEffect, useState } from 'react';\nimport SingleSpeakerLayout from './SingleSpeakerLayout';\nimport SpeakerLayout from './SpeakerLayout';\n\nconst FRAME_DECODE_TIMEOUT = 5000;\n\ninterface RoomPageProps {\n  url: string;\n  token: string;\n  layout: string;\n}\n\nexport default function RoomPage({ url, token, layout }: RoomPageProps) {\n  const [error, setError] = useState<Error>();\n  if (!url || !token) {\n    return <div className=\"error\">missing required params url and token</div>;\n  }\n\n  return (\n    <LiveKitRoom serverUrl={url} token={token} onError={setError}>\n      {error ? <div className=\"error\">{error.message}</div> : <CompositeTemplate layout={layout} />}\n    </LiveKitRoom>\n  );\n}\n\ninterface CompositeTemplateProps {\n  layout: string;\n}\n\nfunction CompositeTemplate({ layout: initialLayout }: CompositeTemplateProps) {\n  const room = useRoomContext();\n  const [layout] = useState(initialLayout);\n  const [hasScreenShare, setHasScreenShare] = useState(false);\n  const screenshareTracks = useTracks([Track.Source.ScreenShare], {\n    onlySubscribed: true,\n  });\n\n  EgressHelper.setRoom(room);\n\n  useEffect(() => {\n    // determines when to start recording\n    // the algorithm used is:\n    // * if there are video tracks published, wait for frames to be decoded\n    // * if there are no video tracks published, start immediately\n    // * if it's been more than 10s, record as long as there are tracks subscribed\n    const startTime = Date.now();\n    const interval = setInterval(async () => {\n      let shouldStartRecording = false;\n      let hasVideoTracks = false;\n      let hasSubscribedTracks = false;\n      let hasDecodedFrames = false;\n      for (const p of Array.from(room.remoteParticipants.values())) {\n        for (const pub of Array.from(p.trackPublications.values())) {\n          if (pub.isSubscribed) {\n            hasSubscribedTracks = true;\n          }\n          if (pub.kind === Track.Kind.Video) {\n            hasVideoTracks = true;\n            if (pub.videoTrack) {\n              const stats = await pub.videoTrack.getRTCStatsReport();\n              if (stats) {\n                hasDecodedFrames = Array.from(stats).some(\n                  (item) => item[1].type === 'inbound-rtp' && item[1].framesDecoded > 0,\n                );\n              }\n            }\n          }\n        }\n      }\n\n      const timeDelta = Date.now() - startTime;\n      if (hasDecodedFrames) {\n        shouldStartRecording = true;\n      } else if (!hasVideoTracks && hasSubscribedTracks && timeDelta > 500) {\n        // adding a small timeout to ensure video tracks has a chance to be published\n        shouldStartRecording = true;\n      } else if (timeDelta > FRAME_DECODE_TIMEOUT && hasSubscribedTracks) {\n        shouldStartRecording = true;\n      }\n\n      if (shouldStartRecording) {\n        EgressHelper.startRecording();\n        clearInterval(interval);\n      }\n    }, 100);\n    /* eslint-disable-next-line react-hooks/exhaustive-deps */\n  }, []);\n\n  useEffect(() => {\n    if (screenshareTracks.length > 0 && screenshareTracks[0].publication) {\n      setHasScreenShare(true);\n    } else {\n      setHasScreenShare(false);\n    }\n  }, [screenshareTracks]);\n\n  const allTracks = useTracks(\n    [Track.Source.Camera, Track.Source.ScreenShare, Track.Source.Unknown],\n    {\n      onlySubscribed: true,\n    },\n  );\n  const filteredTracks = allTracks.filter(\n    (tr) =>\n      tr.publication.kind === Track.Kind.Video &&\n      tr.participant.identity !== room.localParticipant.identity,\n  );\n\n  let interfaceStyle = 'dark';\n  if (layout.endsWith('-light')) {\n    interfaceStyle = 'light';\n  }\n\n  let containerClass = 'roomContainer';\n  if (interfaceStyle) {\n    containerClass += ` ${interfaceStyle}`;\n  }\n\n  // determine layout to use\n  let main: ReactElement = <></>;\n  let effectiveLayout = layout;\n  if (hasScreenShare && layout.startsWith('grid')) {\n    effectiveLayout = layout.replace('grid', 'speaker');\n  }\n  if (room.state !== ConnectionState.Disconnected) {\n    if (effectiveLayout.startsWith('speaker')) {\n      main = <SpeakerLayout tracks={filteredTracks} />;\n    } else if (effectiveLayout.startsWith('single-speaker')) {\n      main = <SingleSpeakerLayout tracks={filteredTracks} />;\n    } else {\n      main = (\n        <GridLayout tracks={filteredTracks}>\n          <ParticipantTile />\n        </GridLayout>\n      );\n    }\n  }\n\n  return (\n    <div className={containerClass}>\n      {main}\n      <RoomAudioRenderer />\n    </div>\n  );\n}\n"
  },
  {
    "path": "template-default/src/SingleSpeakerLayout.tsx",
    "content": "/**\n * Copyright 2023 LiveKit, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { TrackReference, useVisualStableUpdate, VideoTrack } from '@livekit/components-react';\nimport { LayoutProps } from './common';\n\nconst SingleSpeakerLayout = ({ tracks: references }: LayoutProps) => {\n  const sortedReferences = useVisualStableUpdate(references, 1);\n  if (sortedReferences.length === 0) {\n    return null;\n  }\n  return <VideoTrack trackRef={sortedReferences[0] as TrackReference} />;\n};\n\nexport default SingleSpeakerLayout;\n"
  },
  {
    "path": "template-default/src/SpeakerLayout.tsx",
    "content": "/**\n * Copyright 2023 LiveKit, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { TrackReference } from '@livekit/components-core';\nimport {\n  CarouselLayout,\n  FocusLayout,\n  ParticipantTile,\n  VideoTrack,\n  useVisualStableUpdate,\n} from '@livekit/components-react';\nimport { LayoutProps } from './common';\n\nconst SpeakerLayout = ({ tracks: references }: LayoutProps) => {\n  const sortedTracks = useVisualStableUpdate(references, 1);\n  const mainTrack = sortedTracks.shift();\n  const remainingTracks = useVisualStableUpdate(sortedTracks, 3);\n\n  if (!mainTrack) {\n    return <></>;\n  } else if (remainingTracks.length === 0) {\n    const trackRef = mainTrack as TrackReference;\n    return <VideoTrack trackRef={trackRef} />;\n  }\n\n  return (\n    <div className=\"lk-focus-layout\">\n      <CarouselLayout tracks={remainingTracks}>\n        <ParticipantTile />\n      </CarouselLayout>\n      <FocusLayout trackRef={mainTrack as TrackReference} />\n    </div>\n  );\n};\n\nexport default SpeakerLayout;\n"
  },
  {
    "path": "template-default/src/common.ts",
    "content": "/**\n * Copyright 2023 LiveKit, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { TrackReference } from '@livekit/components-core';\n\nexport interface LayoutProps {\n  tracks: TrackReference[];\n}\n"
  },
  {
    "path": "template-default/src/index.css",
    "content": "/**\n * Copyright 2023 LiveKit, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nbody {\n  margin: 0;\n  font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',\n    'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',\n    sans-serif;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n\ncode {\n  font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',\n    monospace;\n}\n"
  },
  {
    "path": "template-default/src/index.tsx",
    "content": "/**\n * Copyright 2023 LiveKit, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport React from 'react';\nimport { createRoot } from 'react-dom/client';\nimport App from './App';\n\nconst container = document.getElementById('root');\nif (!container) throw new Error('Failed to find the root element');\n\nconst root = createRoot(container);\n\nroot.render(\n  <React.StrictMode>\n    <App />\n  </React.StrictMode>,\n);\n"
  },
  {
    "path": "template-default/src/vite-env.d.ts",
    "content": "/// <reference types=\"vite/client\" />\n"
  },
  {
    "path": "template-default/tsconfig.app.json",
    "content": "{\n  \"compilerOptions\": {\n    \"tsBuildInfoFile\": \"./node_modules/.tmp/tsconfig.app.tsbuildinfo\",\n    \"target\": \"ES2022\",\n    \"useDefineForClassFields\": true,\n    \"lib\": [\"ES2022\", \"DOM\", \"DOM.Iterable\"],\n    \"module\": \"ESNext\",\n    \"skipLibCheck\": true,\n\n    /* Bundler mode */\n    \"moduleResolution\": \"bundler\",\n    \"allowImportingTsExtensions\": true,\n    \"moduleDetection\": \"force\",\n    \"noEmit\": true,\n    \"jsx\": \"react-jsx\",\n\n    /* Linting */\n    \"strict\": true,\n    \"noUnusedLocals\": true,\n    \"noUnusedParameters\": true,\n    \"erasableSyntaxOnly\": true,\n    \"noFallthroughCasesInSwitch\": true,\n    \"noUncheckedSideEffectImports\": true\n  },\n  \"include\": [\"src\"]\n}\n"
  },
  {
    "path": "template-default/tsconfig.json",
    "content": "{\n  \"files\": [],\n  \"references\": [\n    { \"path\": \"./tsconfig.app.json\" },\n    { \"path\": \"./tsconfig.node.json\" }\n  ]\n}\n"
  },
  {
    "path": "template-default/tsconfig.node.json",
    "content": "{\n  \"compilerOptions\": {\n    \"tsBuildInfoFile\": \"./node_modules/.tmp/tsconfig.node.tsbuildinfo\",\n    \"target\": \"ES2023\",\n    \"lib\": [\"ES2023\"],\n    \"module\": \"ESNext\",\n    \"skipLibCheck\": true,\n\n    /* Bundler mode */\n    \"moduleResolution\": \"bundler\",\n    \"allowImportingTsExtensions\": true,\n    \"moduleDetection\": \"force\",\n    \"noEmit\": true,\n\n    /* Linting */\n    \"strict\": true,\n    \"noUnusedLocals\": true,\n    \"noUnusedParameters\": true,\n    \"erasableSyntaxOnly\": true,\n    \"noFallthroughCasesInSwitch\": true,\n    \"noUncheckedSideEffectImports\": true\n  },\n  \"include\": [\"vite.config.ts\"]\n}\n"
  },
  {
    "path": "template-default/vite.config.ts",
    "content": "import { defineConfig } from 'vite'\nimport react from '@vitejs/plugin-react'\n\n// https://vite.dev/config/\nexport default defineConfig({\n  plugins: [react()],\n  build: {\n\t  sourcemap: \"hidden\",\n\t  outDir: \"build\"\n  }\n})\n"
  },
  {
    "path": "template-sdk/.gitignore",
    "content": "node_modules/\ndist/"
  },
  {
    "path": "template-sdk/.npmignore",
    "content": ".github\nnode_modules\ntsconfig.json\n.prettierrc\n"
  },
  {
    "path": "template-sdk/.prettierrc",
    "content": "{\n  \"singleQuote\": true,\n  \"trailingComma\": \"all\",\n  \"semi\": true,\n  \"tabWidth\": 2,\n  \"printWidth\": 100,\n  \"plugins\": [],\n  \"pluginSearchDirs\": [\".\"]\n}\n"
  },
  {
    "path": "template-sdk/README.md",
    "content": "# Egress Recording Template SDK\n\nThis lightweight SDK makes it simple to build your own Room Composite templates.\n\n## Docs\n\nSee [custom egress template docs](https://docs.livekit.io/guides/egress/custom-template/)\n"
  },
  {
    "path": "template-sdk/package.json",
    "content": "{\n  \"name\": \"@livekit/egress-sdk\",\n  \"version\": \"0.2.1\",\n  \"description\": \"A lightweight SDK for developing RoomComposite templates\",\n  \"main\": \"dist/index.js\",\n  \"types\": \"dist/index.d.ts\",\n  \"source\": \"src/index.ts\",\n  \"repository\": \"https://github.com/livekit/egress\",\n  \"author\": \"David Zhao <dz@livekit.io>\",\n  \"license\": \"Apache-2.0\",\n  \"scripts\": {\n    \"build\": \"tsc\"\n  },\n  \"devDependencies\": {\n    \"livekit-client\": \"^2.12.0\",\n    \"prettier\": \"^2.8.8\",\n    \"typescript\": \"^5.8.3\"\n  },\n  \"peerDependencies\": {\n    \"livekit-client\": \"^1.15.13 || ^2.7.5\"\n  }\n}\n"
  },
  {
    "path": "template-sdk/src/index.ts",
    "content": "/**\n * Copyright 2023 LiveKit, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nimport { ParticipantEvent, Room, RoomEvent } from 'livekit-client';\n\nconst EgressHelper = {\n  /**\n   * RoomComposite will pass URL to your livekit's server instance.\n   * @returns\n   */\n  getLiveKitURL(): string {\n    const url = getURLParam('url');\n    if (!url) {\n      throw new Error('url is not found in query string');\n    }\n    return url;\n  },\n\n  /**\n   *\n   * @returns access token to pass to `Room.connect`\n   */\n  getAccessToken(): string {\n    const token = getURLParam('token');\n    if (!token) {\n      throw new Error('token is not found in query string');\n    }\n    return token;\n  },\n\n  /**\n   * the current desired layout. layout can be changed dynamically with [Egress.UpdateLayout](https://github.com/livekit/protocol/blob/main/livekit_egress.proto#L15)\n   * @returns\n   */\n  getLayout(): string {\n    if (state.layout) {\n      return state.layout;\n    }\n    const layout = getURLParam('layout');\n    return layout ?? '';\n  },\n\n  /**\n   * Call when successfully connected to the room\n   * @param room\n   */\n  setRoom(room: Room) {\n    if (currentRoom) {\n      currentRoom.off(RoomEvent.Disconnected, EgressHelper.endRecording);\n    }\n\n    currentRoom = room;\n    currentRoom.localParticipant.on(ParticipantEvent.ParticipantMetadataChanged, onMetadataChanged);\n    currentRoom.on(RoomEvent.Disconnected, EgressHelper.endRecording);\n    onMetadataChanged();\n  },\n\n  /**\n   * Starts recording the room that's passed in\n   */\n  startRecording() {\n    console.log('START_RECORDING');\n  },\n\n  /**\n   * Finishes recording the room, by default, it'll end automatically finish\n   * when all other participants have left the room.\n   */\n  endRecording() {\n    currentRoom = undefined;\n    console.log('END_RECORDING');\n  },\n\n  /**\n   * Registers a callback to listen to layout changes.\n   * @param f\n   */\n  onLayoutChanged(f: (layout: string) => void) {\n    layoutChangedCallback = f;\n  },\n};\n\nlet currentRoom: Room | undefined;\nlet layoutChangedCallback: (layout: string) => void | undefined;\nlet state: TemplateState = {\n  layout: '',\n};\n\ninterface TemplateState {\n  layout: string;\n}\n\nfunction onMetadataChanged() {\n  // for recorder, metadata is a JSON object containing layout\n  const metadata = currentRoom?.localParticipant.metadata;\n  if (metadata) {\n    const newState: TemplateState = JSON.parse(metadata);\n    if (newState && newState.layout !== state.layout) {\n      state = newState;\n      layoutChangedCallback(state.layout);\n    }\n  }\n}\n\nfunction getURLParam(name: string): string | null {\n  const query = new URLSearchParams(window.location.search);\n  return query.get(name);\n}\n\nexport default EgressHelper;\n"
  },
  {
    "path": "template-sdk/tsconfig.json",
    "content": "{\n  \"compilerOptions\": {\n    \"target\": \"es2015\",                          /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017', 'ES2018', 'ES2019', 'ES2020', or 'ESNEXT'. */\n    \"module\": \"commonjs\",                     /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', 'es2020', or 'ESNext'. */\n    \"outDir\": \"dist\",\n    \"declaration\": true,\n    \"sourceMap\": true,\n    \"strict\": true,                           /* Enable all strict type-checking options. */\n    \"esModuleInterop\": true,                  /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */\n    \"skipLibCheck\": true,                     /* Skip type checking of declaration files. */\n    \"noUnusedLocals\": true,\n    \"forceConsistentCasingInFileNames\": true  /* Disallow inconsistently-cased references to the same file. */\n  },\n  \"include\": [\n    \"src/**/*\",\n  ]\n}\n"
  },
  {
    "path": "test/agents/.gitignore",
    "content": "venv/\n.env\n"
  },
  {
    "path": "test/agents/guest.py",
    "content": "from dotenv import load_dotenv\nfrom livekit.agents import (\n    Agent,\n    AgentSession,\n    JobContext,\n    WorkerOptions,\n    cli,\n)\nfrom livekit.plugins import deepgram, elevenlabs, openai, silero\n\n\nload_dotenv(dotenv_path=\".env\", override=True)\n\n\nasync def entrypoint(ctx: JobContext):\n    await ctx.connect()\n\n    agent = Agent(\n        instructions=\"You are a guest on a podcast.\"\n                     \" The audio from this conversation will be streamed to live listeners.\"\n                     \" Choose a field of study and expertise, and talk about recent developments in that field.\",\n        turn_detection=\"vad\",\n    )\n    session = AgentSession(\n        vad=silero.VAD.load(),\n        stt=deepgram.STT(model=\"nova-3\"),\n        llm=openai.LLM(model=\"gpt-4o-mini\"),\n        tts=elevenlabs.TTS(),\n    )\n\n    await session.start(agent=agent, room=ctx.room)\n\n\nif __name__ == \"__main__\":\n    cli.run_app(WorkerOptions(entrypoint_fnc=entrypoint, agent_name=\"egress-integration-guest\"))\n"
  },
  {
    "path": "test/agents/host.py",
    "content": "from dotenv import load_dotenv\nfrom livekit.agents import (\n    Agent,\n    AgentSession,\n    JobContext,\n    WorkerOptions,\n    cli,\n)\nfrom livekit.plugins import deepgram, elevenlabs, openai, silero\n\n\nload_dotenv(dotenv_path=\".env\", override=True)\n\n\nasync def entrypoint(ctx: JobContext):\n    await ctx.connect()\n\n    agent = Agent(\n        instructions=\"You are hosting a podcast, and you will be having a conversation with your guest.\"\n                     \" The audio from this conversation will be streamed to live listeners.\"\n                     \" Ask engaging questions and keep the conversation flowing.\",\n        turn_detection=\"vad\"\n    )\n    session = AgentSession(\n        vad=silero.VAD.load(),\n        stt=deepgram.STT(model=\"nova-3\"),\n        llm=openai.LLM(model=\"gpt-4o-mini\"),\n        tts=elevenlabs.TTS(),\n    )\n\n    await session.start(agent=agent, room=ctx.room)\n    await session.generate_reply(instructions=\"Greet your guest and ask them about their field of study.\")\n\n\nif __name__ == \"__main__\":\n    cli.run_app(WorkerOptions(entrypoint_fnc=entrypoint, agent_name=\"egress-integration-host\"))\n"
  },
  {
    "path": "test/agents/requirements.txt",
    "content": "livekit-agents>=1.0.0\nlivekit-plugins-deepgram>=1.0.0\nlivekit-plugins-elevenlabs>=1.0.0\nlivekit-plugins-openai>=1.0.0\nlivekit-plugins-cartesia>=1.0.0\nlivekit-plugins-silero>=1.0.0\nlivekit-plugins-turn-detector>=1.0.0\npython-dotenv~=1.0\n"
  },
  {
    "path": "test/agents.go",
    "content": "// Copyright 2025 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"os/exec\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/protocol/livekit\"\n\tlksdk \"github.com/livekit/server-sdk-go/v2\"\n)\n\nfunc (r *Runner) launchAgents(t *testing.T) {\n\tcmd := exec.Command(\"python3\", \"guest.py\", \"dev\")\n\tcmd.Dir = \"/agents\"\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\trequire.NoError(t, cmd.Start())\n\n\tcmd = exec.Command(\"python3\", \"host.py\", \"dev\")\n\tcmd.Dir = \"/agents\"\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\trequire.NoError(t, cmd.Start())\n\n\tagentsClient := lksdk.NewAgentDispatchServiceClient(r.WsUrl, r.ApiKey, r.ApiSecret)\n\tguest, err := agentsClient.CreateDispatch(context.Background(), &livekit.CreateAgentDispatchRequest{\n\t\tAgentName: \"egress-integration-guest\",\n\t\tRoom:      r.RoomName,\n\t})\n\trequire.NoError(t, err)\n\n\thost, err := agentsClient.CreateDispatch(context.Background(), &livekit.CreateAgentDispatchRequest{\n\t\tAgentName: \"egress-integration-host\",\n\t\tRoom:      r.RoomName,\n\t})\n\trequire.NoError(t, err)\n\n\tt.Cleanup(func() {\n\t\t_, _ = agentsClient.DeleteDispatch(context.Background(), &livekit.DeleteAgentDispatchRequest{\n\t\t\tDispatchId: host.Id,\n\t\t\tRoom:       r.RoomName,\n\t\t})\n\t\t_, _ = agentsClient.DeleteDispatch(context.Background(), &livekit.DeleteAgentDispatchRequest{\n\t\t\tDispatchId: guest.Id,\n\t\t\tRoom:       r.RoomName,\n\t\t})\n\t})\n}\n"
  },
  {
    "path": "test/builder.go",
    "content": "// Copyright 2024 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"path\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/egress\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/rpc\"\n\t\"github.com/livekit/protocol/utils\"\n)\n\nconst (\n\twebUrl       = \"https://download.blender.org/peach/bigbuckbunny_movies/BigBuckBunny_320x180.mp4\"\n\tsetAtRuntime = \"set-at-runtime\"\n)\n\ntype testCase struct {\n\tname        string\n\trequestType types.RequestType\n\n\tpublishOptions\n\n\t// encoding options\n\tencodingOptions *livekit.EncodingOptions\n\tencodingPreset  livekit.EncodingOptionsPreset\n\n\t*fileOptions\n\t*streamOptions\n\t*segmentOptions\n\t*imageOptions\n\t*v2OutputOptions\n\n\tmulti  bool\n\tcustom func(*testing.T, *testCase)\n\n\tcontentCheck func(t *testing.T, path string, info *FFProbeInfo)\n}\n\ntype publishOptions struct {\n\taudioCodec     types.MimeType\n\taudioDelay     time.Duration\n\taudioUnpublish time.Duration\n\taudioRepublish time.Duration\n\taudioOnly      bool\n\taudioMixing    livekit.AudioMixing\n\taudioTrackID   string\n\n\tvideoCodec     types.MimeType\n\tvideoDelay     time.Duration\n\tvideoUnpublish time.Duration\n\tvideoRepublish time.Duration\n\tvideoOnly      bool\n\tvideoTrackID   string\n\n\tlayout string\n\n\t// v2 Media source fields\n\tmediaVideoTrackID     string\n\tmediaParticipantVideo *livekit.ParticipantVideo\n\taudioRoutes           []*livekit.AudioRoute\n\n\t// v2 Template source fields\n\ttemplateCustomBaseUrl string\n}\n\ntype fileOptions struct {\n\tfilename   string\n\tfileType   livekit.EncodedFileType\n\toutputType types.OutputType\n}\n\ntype streamOptions struct {\n\tstreamUrls   []string\n\trawFileName  string\n\twebsocketUrl string\n\toutputType   types.OutputType\n}\n\ntype segmentOptions struct {\n\tprefix       string\n\tplaylist     string\n\tlivePlaylist string\n\tsuffix       livekit.SegmentedFileSuffix\n}\n\ntype imageOptions struct {\n\tprefix string\n\tsuffix livekit.ImageFileSuffix\n}\n\ntype v2OutputOptions struct {\n\toutputs []*livekit.Output\n\tstorage *livekit.StorageConfig\n}\n\nfunc (r *Runner) build(test *testCase) *rpc.StartEgressRequest {\n\tswitch test.requestType {\n\tcase types.RequestTypeRoomComposite:\n\t\troom := &livekit.RoomCompositeEgressRequest{\n\t\t\tRoomName:    r.RoomName,\n\t\t\tLayout:      test.layout,\n\t\t\tAudioOnly:   test.audioOnly,\n\t\t\tAudioMixing: test.audioMixing,\n\t\t\tVideoOnly:   test.videoOnly,\n\t\t}\n\t\tif test.encodingOptions != nil {\n\t\t\troom.Options = &livekit.RoomCompositeEgressRequest_Advanced{\n\t\t\t\tAdvanced: test.encodingOptions,\n\t\t\t}\n\t\t} else if test.encodingPreset != 0 {\n\t\t\troom.Options = &livekit.RoomCompositeEgressRequest_Preset{\n\t\t\t\tPreset: test.encodingPreset,\n\t\t\t}\n\t\t}\n\t\tif test.fileOptions != nil {\n\t\t\troom.FileOutputs = r.buildFileOutputs(test.fileOptions)\n\t\t}\n\t\tif test.streamOptions != nil {\n\t\t\troom.StreamOutputs = r.buildStreamOutputs(test.streamOptions)\n\t\t}\n\t\tif test.segmentOptions != nil {\n\t\t\troom.SegmentOutputs = r.buildSegmentOutputs(test.segmentOptions)\n\t\t}\n\t\tif test.imageOptions != nil {\n\t\t\troom.ImageOutputs = r.buildImageOutputs(test.imageOptions)\n\t\t}\n\t\treturn &rpc.StartEgressRequest{\n\t\t\tEgressId: utils.NewGuid(utils.EgressPrefix),\n\t\t\tRequest:  &rpc.StartEgressRequest_RoomComposite{RoomComposite: room},\n\t\t}\n\n\tcase types.RequestTypeWeb:\n\t\tweb := &livekit.WebEgressRequest{\n\t\t\tUrl:       webUrl,\n\t\t\tAudioOnly: test.audioOnly,\n\t\t\tVideoOnly: test.videoOnly,\n\t\t}\n\t\tif test.encodingOptions != nil {\n\t\t\tweb.Options = &livekit.WebEgressRequest_Advanced{\n\t\t\t\tAdvanced: test.encodingOptions,\n\t\t\t}\n\t\t} else if test.encodingPreset != 0 {\n\t\t\tweb.Options = &livekit.WebEgressRequest_Preset{\n\t\t\t\tPreset: test.encodingPreset,\n\t\t\t}\n\t\t}\n\t\tif test.fileOptions != nil {\n\t\t\tweb.FileOutputs = r.buildFileOutputs(test.fileOptions)\n\t\t}\n\t\tif test.streamOptions != nil {\n\t\t\tweb.StreamOutputs = r.buildStreamOutputs(test.streamOptions)\n\t\t}\n\t\tif test.segmentOptions != nil {\n\t\t\tweb.SegmentOutputs = r.buildSegmentOutputs(test.segmentOptions)\n\t\t}\n\t\tif test.imageOptions != nil {\n\t\t\tweb.ImageOutputs = r.buildImageOutputs(test.imageOptions)\n\t\t}\n\t\treturn &rpc.StartEgressRequest{\n\t\t\tEgressId: utils.NewGuid(utils.EgressPrefix),\n\t\t\tRequest:  &rpc.StartEgressRequest_Web{Web: web},\n\t\t}\n\n\tcase types.RequestTypeParticipant:\n\t\tparticipant := &livekit.ParticipantEgressRequest{\n\t\t\tRoomName: r.RoomName,\n\t\t\tIdentity: r.room.LocalParticipant.Identity(),\n\t\t}\n\t\tif test.encodingOptions != nil {\n\t\t\tparticipant.Options = &livekit.ParticipantEgressRequest_Advanced{\n\t\t\t\tAdvanced: test.encodingOptions,\n\t\t\t}\n\t\t} else if test.encodingPreset != 0 {\n\t\t\tparticipant.Options = &livekit.ParticipantEgressRequest_Preset{\n\t\t\t\tPreset: test.encodingPreset,\n\t\t\t}\n\t\t}\n\t\tif test.fileOptions != nil {\n\t\t\tparticipant.FileOutputs = r.buildFileOutputs(test.fileOptions)\n\t\t}\n\t\tif test.streamOptions != nil {\n\t\t\tparticipant.StreamOutputs = r.buildStreamOutputs(test.streamOptions)\n\t\t}\n\t\tif test.segmentOptions != nil {\n\t\t\tparticipant.SegmentOutputs = r.buildSegmentOutputs(test.segmentOptions)\n\t\t}\n\t\tif test.imageOptions != nil {\n\t\t\tparticipant.ImageOutputs = r.buildImageOutputs(test.imageOptions)\n\t\t}\n\t\treturn &rpc.StartEgressRequest{\n\t\t\tEgressId: utils.NewGuid(utils.EgressPrefix),\n\t\t\tRequest:  &rpc.StartEgressRequest_Participant{Participant: participant},\n\t\t}\n\n\tcase types.RequestTypeTrackComposite:\n\t\ttrackComposite := &livekit.TrackCompositeEgressRequest{\n\t\t\tRoomName:     r.RoomName,\n\t\t\tAudioTrackId: test.audioTrackID,\n\t\t\tVideoTrackId: test.videoTrackID,\n\t\t}\n\t\tif test.encodingOptions != nil {\n\t\t\ttrackComposite.Options = &livekit.TrackCompositeEgressRequest_Advanced{\n\t\t\t\tAdvanced: test.encodingOptions,\n\t\t\t}\n\t\t} else if test.encodingPreset != 0 {\n\t\t\ttrackComposite.Options = &livekit.TrackCompositeEgressRequest_Preset{\n\t\t\t\tPreset: test.encodingPreset,\n\t\t\t}\n\t\t}\n\t\tif test.fileOptions != nil {\n\t\t\ttrackComposite.FileOutputs = r.buildFileOutputs(test.fileOptions)\n\t\t}\n\t\tif test.streamOptions != nil {\n\t\t\ttrackComposite.StreamOutputs = r.buildStreamOutputs(test.streamOptions)\n\t\t}\n\t\tif test.segmentOptions != nil {\n\t\t\ttrackComposite.SegmentOutputs = r.buildSegmentOutputs(test.segmentOptions)\n\t\t}\n\t\tif test.imageOptions != nil {\n\t\t\ttrackComposite.ImageOutputs = r.buildImageOutputs(test.imageOptions)\n\t\t}\n\t\treturn &rpc.StartEgressRequest{\n\t\t\tEgressId: utils.NewGuid(utils.EgressPrefix),\n\t\t\tRequest:  &rpc.StartEgressRequest_TrackComposite{TrackComposite: trackComposite},\n\t\t}\n\n\tcase types.RequestTypeTrack:\n\t\ttrackID := test.audioTrackID\n\t\tif trackID == \"\" {\n\t\t\ttrackID = test.videoTrackID\n\t\t}\n\t\ttrack := &livekit.TrackEgressRequest{\n\t\t\tRoomName: r.RoomName,\n\t\t\tTrackId:  trackID,\n\t\t}\n\t\tif test.fileOptions != nil {\n\t\t\ttrack.Output = &livekit.TrackEgressRequest_File{\n\t\t\t\tFile: &livekit.DirectFileOutput{\n\t\t\t\t\tFilepath: path.Join(r.FilePrefix, test.filename),\n\t\t\t\t},\n\t\t\t}\n\t\t} else if test.streamOptions != nil {\n\t\t\ttrack.Output = &livekit.TrackEgressRequest_WebsocketUrl{\n\t\t\t\tWebsocketUrl: test.websocketUrl,\n\t\t\t}\n\t\t}\n\t\treturn &rpc.StartEgressRequest{\n\t\t\tEgressId: utils.NewGuid(utils.EgressPrefix),\n\t\t\tRequest:  &rpc.StartEgressRequest_Track{Track: track},\n\t\t}\n\t}\n\n\tpanic(\"unknown request type\")\n}\n\nfunc (r *Runner) buildFileOutputs(o *fileOptions) []*livekit.EncodedFileOutput {\n\tif u := r.getUploadConfig(); u != nil {\n\t\toutput := &livekit.EncodedFileOutput{\n\t\t\tFileType: o.fileType,\n\t\t\tFilepath: path.Join(uploadPrefix, o.filename),\n\t\t}\n\n\t\tswitch conf := u.(type) {\n\t\tcase *livekit.S3Upload:\n\t\t\toutput.Output = &livekit.EncodedFileOutput_S3{S3: conf}\n\t\tcase *livekit.GCPUpload:\n\t\t\toutput.Output = &livekit.EncodedFileOutput_Gcp{Gcp: conf}\n\t\tcase *livekit.AzureBlobUpload:\n\t\t\toutput.Output = &livekit.EncodedFileOutput_Azure{Azure: conf}\n\t\t}\n\n\t\treturn []*livekit.EncodedFileOutput{output}\n\t}\n\n\treturn []*livekit.EncodedFileOutput{{\n\t\tFileType: o.fileType,\n\t\tFilepath: path.Join(r.FilePrefix, o.filename),\n\t}}\n}\n\nfunc (r *Runner) buildStreamOutputs(o *streamOptions) []*livekit.StreamOutput {\n\tvar protocol livekit.StreamProtocol\n\tswitch o.outputType {\n\tcase types.OutputTypeRTMP:\n\t\tprotocol = livekit.StreamProtocol_RTMP\n\tcase types.OutputTypeSRT:\n\t\tprotocol = livekit.StreamProtocol_SRT\n\tdefault:\n\t\tprotocol = livekit.StreamProtocol_DEFAULT_PROTOCOL\n\t}\n\n\treturn []*livekit.StreamOutput{{\n\t\tProtocol: protocol,\n\t\tUrls:     o.streamUrls,\n\t}}\n}\n\nfunc (r *Runner) buildSegmentOutputs(o *segmentOptions) []*livekit.SegmentedFileOutput {\n\tif u := r.getUploadConfig(); u != nil {\n\t\toutput := &livekit.SegmentedFileOutput{\n\t\t\tFilenamePrefix:   path.Join(uploadPrefix, o.prefix),\n\t\t\tPlaylistName:     o.playlist,\n\t\t\tLivePlaylistName: o.livePlaylist,\n\t\t\tFilenameSuffix:   o.suffix,\n\t\t}\n\n\t\tswitch conf := u.(type) {\n\t\tcase *livekit.S3Upload:\n\t\t\toutput.Output = &livekit.SegmentedFileOutput_S3{S3: conf}\n\t\tcase *livekit.GCPUpload:\n\t\t\toutput.Output = &livekit.SegmentedFileOutput_Gcp{Gcp: conf}\n\t\tcase *livekit.AzureBlobUpload:\n\t\t\toutput.Output = &livekit.SegmentedFileOutput_Azure{Azure: conf}\n\t\t}\n\n\t\treturn []*livekit.SegmentedFileOutput{output}\n\t}\n\n\treturn []*livekit.SegmentedFileOutput{{\n\t\tFilenamePrefix:   path.Join(r.FilePrefix, o.prefix),\n\t\tPlaylistName:     o.playlist,\n\t\tLivePlaylistName: o.livePlaylist,\n\t\tFilenameSuffix:   o.suffix,\n\t}}\n}\n\nfunc (r *Runner) buildImageOutputs(o *imageOptions) []*livekit.ImageOutput {\n\treturn []*livekit.ImageOutput{{\n\t\tCaptureInterval: 5,\n\t\tWidth:           1280,\n\t\tHeight:          720,\n\t\tFilenamePrefix:  path.Join(r.FilePrefix, o.prefix),\n\t\tFilenameSuffix:  o.suffix,\n\t}}\n}\n\nfunc (r *Runner) getUploadConfig() interface{} {\n\tconfigs := make([]interface{}, 0)\n\tif r.S3Upload != nil {\n\t\tconfigs = append(configs, r.S3Upload)\n\t}\n\tif r.GCPUpload != nil {\n\t\tconfigs = append(configs, r.GCPUpload)\n\t}\n\tif r.AzureUpload != nil {\n\t\tconfigs = append(configs, r.AzureUpload)\n\t}\n\tif len(configs) == 0 {\n\t\treturn nil\n\t}\n\treturn configs[r.testNumber%len(configs)]\n}\n\nfunc (test *testCase) isV2() bool {\n\tswitch test.requestType {\n\tcase types.RequestTypeTemplate, types.RequestTypeMedia:\n\t\treturn true\n\tcase types.RequestTypeWeb:\n\t\treturn test.v2OutputOptions != nil\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (r *Runner) buildRequest(test *testCase) *rpc.StartEgressRequest {\n\tif test.isV2() {\n\t\treturn r.buildV2(test)\n\t}\n\treturn r.build(test)\n}\n\nfunc (r *Runner) getV2StorageConfig() *livekit.StorageConfig {\n\tu := r.getUploadConfig()\n\tif u == nil {\n\t\treturn nil\n\t}\n\tswitch conf := u.(type) {\n\tcase *livekit.S3Upload:\n\t\treturn &livekit.StorageConfig{Provider: &livekit.StorageConfig_S3{S3: conf}}\n\tcase *livekit.GCPUpload:\n\t\treturn &livekit.StorageConfig{Provider: &livekit.StorageConfig_Gcp{Gcp: conf}}\n\tcase *livekit.AzureBlobUpload:\n\t\treturn &livekit.StorageConfig{Provider: &livekit.StorageConfig_Azure{Azure: conf}}\n\tcase *livekit.AliOSSUpload:\n\t\treturn &livekit.StorageConfig{Provider: &livekit.StorageConfig_AliOSS{AliOSS: conf}}\n\t}\n\treturn nil\n}\n\nfunc (r *Runner) buildV2Outputs(test *testCase) []*livekit.Output {\n\tif test.v2OutputOptions != nil && len(test.outputs) > 0 {\n\t\treturn test.outputs\n\t}\n\n\tstorage := r.getV2StorageConfig()\n\tvar prefix string\n\tif storage != nil {\n\t\tprefix = uploadPrefix\n\t} else {\n\t\tprefix = r.FilePrefix\n\t}\n\n\tvar outputs []*livekit.Output\n\n\tif test.fileOptions != nil {\n\t\toutputs = append(outputs, &livekit.Output{\n\t\t\tConfig: &livekit.Output_File{\n\t\t\t\tFile: &livekit.FileOutput{\n\t\t\t\t\tFileType: test.fileType,\n\t\t\t\t\tFilepath: path.Join(prefix, test.filename),\n\t\t\t\t},\n\t\t\t},\n\t\t\tStorage: storage,\n\t\t})\n\t}\n\n\tif test.streamOptions != nil {\n\t\tvar protocol livekit.StreamProtocol\n\t\tswitch test.streamOptions.outputType {\n\t\tcase types.OutputTypeRTMP:\n\t\t\tprotocol = livekit.StreamProtocol_RTMP\n\t\tcase types.OutputTypeSRT:\n\t\t\tprotocol = livekit.StreamProtocol_SRT\n\t\tdefault:\n\t\t\tprotocol = livekit.StreamProtocol_DEFAULT_PROTOCOL\n\t\t}\n\t\toutputs = append(outputs, &livekit.Output{\n\t\t\tConfig: &livekit.Output_Stream{\n\t\t\t\tStream: &livekit.StreamOutput{\n\t\t\t\t\tProtocol: protocol,\n\t\t\t\t\tUrls:     test.streamUrls,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\n\tif test.segmentOptions != nil {\n\t\toutputs = append(outputs, &livekit.Output{\n\t\t\tConfig: &livekit.Output_Segments{\n\t\t\t\tSegments: &livekit.SegmentedFileOutput{\n\t\t\t\t\tFilenamePrefix:   path.Join(prefix, test.segmentOptions.prefix),\n\t\t\t\t\tPlaylistName:     test.playlist,\n\t\t\t\t\tLivePlaylistName: test.livePlaylist,\n\t\t\t\t\tFilenameSuffix:   test.segmentOptions.suffix,\n\t\t\t\t},\n\t\t\t},\n\t\t\tStorage: storage,\n\t\t})\n\t}\n\n\tif test.imageOptions != nil {\n\t\toutputs = append(outputs, &livekit.Output{\n\t\t\tConfig: &livekit.Output_Images{\n\t\t\t\tImages: &livekit.ImageOutput{\n\t\t\t\t\tCaptureInterval: 5,\n\t\t\t\t\tWidth:           1280,\n\t\t\t\t\tHeight:          720,\n\t\t\t\t\tFilenamePrefix:  path.Join(r.FilePrefix, test.imageOptions.prefix),\n\t\t\t\t\tFilenameSuffix:  test.imageOptions.suffix,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\n\treturn outputs\n}\n\nfunc (r *Runner) buildV2(test *testCase) *rpc.StartEgressRequest {\n\treplayReq := &livekit.ExportReplayRequest{\n\t\tReplayId: \"test-replay-id\",\n\t\tOutputs:  r.buildV2Outputs(test),\n\t}\n\n\t// Source\n\tswitch test.requestType {\n\tcase types.RequestTypeTemplate:\n\t\treplayReq.Source = &livekit.ExportReplayRequest_Template{\n\t\t\tTemplate: &livekit.TemplateSource{\n\t\t\t\tLayout:        test.layout,\n\t\t\t\tAudioOnly:     test.audioOnly,\n\t\t\t\tVideoOnly:     test.videoOnly,\n\t\t\t\tCustomBaseUrl: test.templateCustomBaseUrl,\n\t\t\t},\n\t\t}\n\n\tcase types.RequestTypeWeb:\n\t\treplayReq.Source = &livekit.ExportReplayRequest_Web{\n\t\t\tWeb: &livekit.WebSource{\n\t\t\t\tUrl:       webUrl,\n\t\t\t\tAudioOnly: test.audioOnly,\n\t\t\t\tVideoOnly: test.videoOnly,\n\t\t\t},\n\t\t}\n\n\tcase types.RequestTypeMedia:\n\t\tmedia := &livekit.MediaSource{}\n\n\t\t// video - use explicit mediaVideoTrackID, or fall back to published videoTrackID\n\t\tvideoTrackID := test.mediaVideoTrackID\n\t\tif videoTrackID == \"\" && test.videoCodec != \"\" {\n\t\t\tvideoTrackID = test.videoTrackID\n\t\t}\n\t\tif videoTrackID != \"\" {\n\t\t\tmedia.Video = &livekit.MediaSource_VideoTrackId{\n\t\t\t\tVideoTrackId: videoTrackID,\n\t\t\t}\n\t\t} else if test.mediaParticipantVideo != nil {\n\t\t\tpv := test.mediaParticipantVideo\n\t\t\tif pv.Identity == setAtRuntime {\n\t\t\t\tpv = &livekit.ParticipantVideo{\n\t\t\t\t\tIdentity:          string(r.room.LocalParticipant.Identity()),\n\t\t\t\t\tPreferScreenShare: pv.PreferScreenShare,\n\t\t\t\t}\n\t\t\t}\n\t\t\tmedia.Video = &livekit.MediaSource_ParticipantVideo{\n\t\t\t\tParticipantVideo: pv,\n\t\t\t}\n\t\t}\n\n\t\t// audio - replace placeholder track IDs with actual published IDs\n\t\tif len(test.audioRoutes) > 0 {\n\t\t\troutes := make([]*livekit.AudioRoute, len(test.audioRoutes))\n\t\t\tfor i, route := range test.audioRoutes {\n\t\t\t\troutes[i] = route\n\t\t\t\tif tr, ok := route.Match.(*livekit.AudioRoute_TrackId); ok && tr.TrackId == setAtRuntime {\n\t\t\t\t\troutes[i] = &livekit.AudioRoute{\n\t\t\t\t\t\tMatch:   &livekit.AudioRoute_TrackId{TrackId: test.audioTrackID},\n\t\t\t\t\t\tChannel: route.Channel,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif pi, ok := route.Match.(*livekit.AudioRoute_ParticipantIdentity); ok && pi.ParticipantIdentity == setAtRuntime {\n\t\t\t\t\troutes[i] = &livekit.AudioRoute{\n\t\t\t\t\t\tMatch:   &livekit.AudioRoute_ParticipantIdentity{ParticipantIdentity: string(r.room.LocalParticipant.Identity())},\n\t\t\t\t\t\tChannel: route.Channel,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tmedia.Audio = &livekit.AudioConfig{Routes: routes}\n\t\t}\n\n\t\treplayReq.Source = &livekit.ExportReplayRequest_Media{\n\t\t\tMedia: media,\n\t\t}\n\t}\n\n\t// Encoding\n\tif test.encodingOptions != nil {\n\t\treplayReq.Encoding = &livekit.ExportReplayRequest_Advanced{\n\t\t\tAdvanced: test.encodingOptions,\n\t\t}\n\t} else if test.encodingPreset != 0 {\n\t\treplayReq.Encoding = &livekit.ExportReplayRequest_Preset{\n\t\t\tPreset: test.encodingPreset,\n\t\t}\n\t}\n\n\t// Global storage\n\tif test.v2OutputOptions != nil && test.storage != nil {\n\t\treplayReq.Storage = test.storage\n\t}\n\n\t// build token since we don't pass a room name\n\tegressID := utils.NewGuid(utils.EgressPrefix)\n\ttoken, _ := egress.BuildEgressToken(egressID, r.ApiKey, r.ApiSecret, r.RoomName)\n\n\treturn &rpc.StartEgressRequest{\n\t\tEgressId: egressID,\n\t\tRequest:  &rpc.StartEgressRequest_Replay{Replay: replayReq},\n\t\tToken:    token,\n\t\tWsUrl:    r.WsUrl,\n\t}\n}\n"
  },
  {
    "path": "test/config-sample.yaml",
    "content": "log_level: error\nredis:\n  address: 192.168.65.2:6379\napi_key: '****'\napi_secret: '****'\nws_url: 'wss://your.livekit.url'\nfile_prefix: /out/output\ns3:\n  access_key: '****'\n  secret: '****'\n  region: us-east-1\n  bucket: mybucket\nroom_name: egress-test\nroom_only: false\ntrack_composite_only: false\ntrack_only: false\nfile_only: false\nstream_only: false\nsegments_only: false\nmuting: false\n"
  },
  {
    "path": "test/content_checks.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//\thttp://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n//go:build integration\n\npackage test\n\nimport (\n\t\"context\"\n\t\"encoding/csv\"\n\t\"fmt\"\n\t\"io\"\n\t\"os/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc (r *Runner) fullContentCheck(t *testing.T, file string, _ *FFProbeInfo) {\n\tif r.Muting {\n\t\t// TODO: support for content check on muted tracks to be added later\n\t\treturn\n\t}\n\n\t// TODO: enable after fixing the issue with missing beeps\n\t// dur, err := parseFFProbeDuration(info.Format.Duration)\n\t//require.NoError(t, err)\n\n\tflashes, err := extractFlashTimestamps(file, r.FilePrefix)\n\trequire.NoError(t, err)\n\n\tbeeps, err := extractBeepTimestamps(file, testSampleBeepLevel, r.FilePrefix)\n\trequire.NoError(t, err)\n\n\tsilenceRanges, err := detectSilence(file, testSampleSilenceLevel, time.Millisecond*100)\n\tif len(silenceRanges) > 0 || err != nil {\n\t\tlogger.Errorw(\"silence ranges not empty\", err, \"silenceRanges\", silenceRanges)\n\t}\n\n\t// require.InDelta(t, len(flashes), len(beeps), 3)\n\t// require.InDelta(t, len(flashes), dur.Round(time.Second).Seconds(), 3)\n\n\t// avgFlashSpacing, err := averageSpacing(flashes)\n\t// require.NoError(t, err)\n\t// 200ms is still pretty generous, should be tighter\n\t// requireDurationInDelta(t, avgFlashSpacing, time.Second, time.Millisecond*200)\n\n\t// avgBeepSpacing, err := averageSpacing(beeps)\n\t// require.NoError(t, err)\n\t// requireDurationInDelta(t, avgBeepSpacing, time.Second, time.Millisecond*200)\n\n\tlogger.Debugw(\"beeps\", \"beeps\", beeps)\n\tlogger.Debugw(\"flashes\", \"flashes\", flashes)\n}\n\nfunc (r *Runner) videoOnlyContentCheck(t *testing.T, file string, info *FFProbeInfo) {\n\tif r.Muting {\n\t\t// TODO: support for content check on muted tracks to be added later\n\t\treturn\n\t}\n\n\tflashes, err := extractFlashTimestamps(file, r.FilePrefix)\n\trequire.NoError(t, err)\n\n\tdur, err := parseFFProbeDuration(info.Format.Duration)\n\trequire.NoError(t, err)\n\n\trequire.InDelta(t, len(flashes), dur.Round(time.Second).Seconds(), 3)\n\tavgFlashSpacing, err := averageSpacing(flashes)\n\trequire.NoError(t, err)\n\t// 200ms is still pretty generous, should be tighter\n\trequireDurationInDelta(t, avgFlashSpacing, time.Second, time.Millisecond*200)\n}\n\nfunc (r *Runner) audioOnlyContentCheck(t *testing.T, file string, _ *FFProbeInfo) {\n\tif r.Muting {\n\t\t// TODO: support for content check on muted tracks to be added later\n\t\treturn\n\t}\n\n\t//TODO: enable after fixing the issue with missing beeps\n\t//dur, err := parseFFProbeDuration(info.Format.Duration)\n\t//require.NoError(t, err)\n\n\tbeeps, err := extractBeepTimestamps(file, testSampleBeepLevel, r.FilePrefix)\n\trequire.NoError(t, err)\n\n\tsilenceRanges, err := detectSilence(file, testSampleSilenceLevel, time.Millisecond*100)\n\tif len(silenceRanges) > 0 || err != nil {\n\t\tlogger.Errorw(\"silence ranges not empty\", err, \"silenceRanges\", silenceRanges)\n\t}\n\n\t// require.NoError(t, err)\n\t// // sometimes the silence range is at the end of the file, ignore it\n\t// require.True(t, len(silenceRanges) == 0 || silenceRanges[0].start > dur-time.Second*2,\n\t// \tfmt.Sprintf(\"unexpected silence ranges: %v\", silenceRanges))\n\n\t// require.InDelta(t, len(beeps), dur.Round(time.Second).Seconds(), 3)\n\n\t// avgBeepSpacing, err := averageSpacing(beeps)\n\t// require.NoError(t, err)\n\t// requireDurationInDelta(t, avgBeepSpacing, time.Second, time.Millisecond*200)\n\tlogger.Debugw(\"beeps\", \"beeps\", beeps)\n}\n\nfunc (r *Runner) fullContentCheckWithVideoUnpublishAt10AndRepublishAt20(t *testing.T, file string, info *FFProbeInfo) {\n\tif r.Muting {\n\t\t// TODO: support for content check on muted to be added later\n\t\treturn\n\t}\n\n\tflashes, err := extractFlashTimestamps(file, r.FilePrefix)\n\trequire.NoError(t, err)\n\n\tdur, err := parseFFProbeDuration(info.Format.Duration)\n\trequire.NoError(t, err)\n\n\tgapLength := time.Second * 10\n\trequire.InDelta(\n\t\tt,\n\t\tfloat64(len(flashes))+gapLength.Seconds(),\n\t\tdur.Round(time.Second).Seconds(),\n\t\t5.0,\n\t\t\"flashes+gap ~= duration (±3s)\",\n\t)\n\n\tgapsFound := 0\n\tfor i := 1; i < len(flashes); i++ {\n\t\tif flashes[i]-flashes[i-1] > gapLength-time.Millisecond*500 {\n\t\t\tgapsFound++\n\t\t\trequireDurationInDelta(t, flashes[i], time.Second*20, time.Second*2)\n\t\t} else {\n\t\t\t// all other flashes should be within 1 second of the previous flash\n\t\t\trequireDurationInDelta(t, flashes[i], flashes[i-1], time.Second+time.Millisecond*200)\n\t\t}\n\t}\n\trequire.Equal(t, gapsFound, 1)\n\n\tr.audioOnlyContentCheck(t, file, info)\n\n}\n\nfunc (r *Runner) streamKeyframeContentCheck(expectedInterval float64) func(t *testing.T, target string, _ *FFProbeInfo) {\n\treturn func(t *testing.T, target string, _ *FFProbeInfo) {\n\t\trequireKeyframeInterval(t, target, expectedInterval)\n\t}\n}\n\n// ensures input is read long enough to get sufficient keyframes for spacing check\nfunc requireKeyframeInterval(t *testing.T, input string, expectedInterval float64) {\n\tt.Helper()\n\tif expectedInterval <= 0 {\n\t\treturn\n\t}\n\n\ttimestamps, err := ffprobeKeyframeTimestamps(input, expectedInterval)\n\n\trequire.NoError(t, err)\n\trequire.GreaterOrEqual(t, len(timestamps), 2, \"ffprobe returned less than two keyframes for %s\", input)\n\n\ttolerance := 0.020 // 20ms\n\tprev := timestamps[0]\n\tfound := false\n\tfor _, ts := range timestamps[1:] {\n\t\tif ts <= prev {\n\t\t\tprev = ts\n\t\t\tcontinue\n\t\t}\n\t\tfound = true\n\t\trequire.InDelta(t, expectedInterval, ts-prev, tolerance, \"keyframe spacing mismatch for %s\", input)\n\t\tprev = ts\n\t}\n\trequire.True(t, found, \"no increasing keyframe timestamps found for %s\", input)\n}\n\nfunc ffprobeKeyframeTimestamps(input string, expectedInterval float64) ([]float64, error) {\n\ttimestamps := []float64{}\n\tvar err error\n\n\t// ensure at least 3 keyframes are read\n\treadSeconds := expectedInterval*4 + 1\n\n\targs := []string{\n\t\t\"-v\", \"error\",\n\t\t\"-fflags\", \"nobuffer\",\n\t\t\"-rw_timeout\", \"5000000\",\n\t\t\"-select_streams\", \"v:0\",\n\t\t\"-show_packets\",\n\t\t\"-show_entries\", \"packet=pts_time,dts_time,flags,stream_index,size,pos\",\n\t\t\"-of\", \"csv=p=0\",\n\t\tinput,\n\t}\n\n\ttimeout := time.Duration(readSeconds) * time.Second\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\n\tcmd := exec.CommandContext(ctx, \"ffprobe\", args...)\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"stdout pipe: %w\", err)\n\t}\n\n\tif err = cmd.Start(); err != nil {\n\t\treturn nil, fmt.Errorf(\"start ffprobe: %w\", err)\n\t}\n\tdefer cmd.Wait()\n\n\tcsvReader := csv.NewReader(stdout)\n\n\tfor {\n\t\trecord, e := csvReader.Read()\n\t\tif e != nil {\n\t\t\t// ignore context && EOF errors, we could be canceling the context after readSeconds\n\t\t\tif ctx.Err() == nil && e != io.EOF {\n\t\t\t\terr = fmt.Errorf(\"read csv: %w\", e)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\tif len(record) != 6 {\n\t\t\terr = fmt.Errorf(\"unexpected record length: %d\", len(record))\n\t\t\tbreak\n\t\t}\n\n\t\tpts, e := strconv.ParseFloat(record[1], 64)\n\t\tif e != nil {\n\t\t\terr = fmt.Errorf(\"parse pts: %w\", e)\n\t\t\tbreak\n\t\t}\n\t\tif strings.Contains(record[5], \"K\") {\n\t\t\ttimestamps = append(timestamps, pts)\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn timestamps, nil\n}\n"
  },
  {
    "path": "test/download.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/url\"\n\t\"os\"\n\t\"testing\"\n\n\t\"cloud.google.com/go/storage\"\n\t\"github.com/Azure/azure-storage-blob-go/azblob\"\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\tawsConfig \"github.com/aws/aws-sdk-go-v2/config\"\n\t\"github.com/aws/aws-sdk-go-v2/credentials\"\n\t\"github.com/aws/aws-sdk-go-v2/feature/s3/manager\"\n\t\"github.com/aws/aws-sdk-go-v2/service/s3\"\n\t\"github.com/googleapis/gax-go/v2\"\n\t\"github.com/stretchr/testify/require\"\n\t\"google.golang.org/api/option\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/protocol/logger\"\n\tlkstorage \"github.com/livekit/storage\"\n)\n\nfunc loadManifest(t *testing.T, c *config.StorageConfig, localFilepath, storageFilepath string) *config.Manifest {\n\tdownload(t, c, localFilepath, storageFilepath, false)\n\tdefer os.Remove(localFilepath)\n\n\tb, err := os.ReadFile(localFilepath)\n\trequire.NoError(t, err)\n\n\tm := &config.Manifest{}\n\terr = json.Unmarshal(b, m)\n\trequire.NoError(t, err)\n\n\treturn m\n}\n\nfunc download(t *testing.T, c *config.StorageConfig, localFilepath, storageFilepath string, delete bool) {\n\tif c != nil {\n\t\tif c.S3 != nil {\n\t\t\tlogger.Debugw(\"s3 download\", \"localFilepath\", localFilepath, \"storageFilepath\", storageFilepath)\n\t\t\tdownloadS3(t, c.S3, localFilepath, storageFilepath, delete)\n\t\t} else if c.GCP != nil {\n\t\t\tlogger.Debugw(\"gcp download\", \"localFilepath\", localFilepath, \"storageFilepath\", storageFilepath)\n\t\t\tdownloadGCP(t, c.GCP, localFilepath, storageFilepath, delete)\n\t\t} else if c.Azure != nil {\n\t\t\tlogger.Debugw(\"azure download\", \"localFilepath\", localFilepath, \"storageFilepath\", storageFilepath)\n\t\t\tdownloadAzure(t, c.Azure, localFilepath, storageFilepath, delete)\n\t\t}\n\t}\n}\n\nfunc downloadS3(t *testing.T, conf *lkstorage.S3Config, localFilepath, storageFilepath string, delete bool) {\n\tfile, err := os.Create(localFilepath)\n\trequire.NoError(t, err)\n\tdefer file.Close()\n\n\tawsConf, err := awsConfig.LoadDefaultConfig(context.Background(), func(o *awsConfig.LoadOptions) error {\n\t\to.Region = conf.Region\n\t\to.Credentials = credentials.StaticCredentialsProvider{\n\t\t\tValue: aws.Credentials{\n\t\t\t\tAccessKeyID:     conf.AccessKey,\n\t\t\t\tSecretAccessKey: conf.Secret,\n\t\t\t\tSessionToken:    conf.SessionToken,\n\t\t\t},\n\t\t}\n\n\t\treturn nil\n\t})\n\trequire.NoError(t, err)\n\ts3Client := s3.NewFromConfig(awsConf)\n\n\t_, err = manager.NewDownloader(s3Client).Download(\n\t\tcontext.Background(),\n\t\tfile,\n\t\t&s3.GetObjectInput{\n\t\t\tBucket: aws.String(conf.Bucket),\n\t\t\tKey:    aws.String(storageFilepath),\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\n\tif delete {\n\t\t_, err = s3Client.DeleteObject(context.Background(), &s3.DeleteObjectInput{\n\t\t\tBucket: aws.String(conf.Bucket),\n\t\t\tKey:    aws.String(storageFilepath),\n\t\t})\n\t\trequire.NoError(t, err)\n\t}\n}\n\nfunc downloadAzure(t *testing.T, conf *lkstorage.AzureConfig, localFilepath, storageFilepath string, delete bool) {\n\tcredential, err := azblob.NewSharedKeyCredential(\n\t\tconf.AccountName,\n\t\tconf.AccountKey,\n\t)\n\trequire.NoError(t, err)\n\n\tpipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{\n\t\tRetry: azblob.RetryOptions{\n\t\t\tPolicy:        azblob.RetryPolicyExponential,\n\t\t\tMaxTries:      maxRetries,\n\t\t\tMaxRetryDelay: maxDelay,\n\t\t},\n\t})\n\tsUrl := fmt.Sprintf(\"https://%s.blob.core.windows.net/%s\", conf.AccountName, conf.ContainerName)\n\tazUrl, err := url.Parse(sUrl)\n\trequire.NoError(t, err)\n\n\tcontainerURL := azblob.NewContainerURL(*azUrl, pipeline)\n\tblobURL := containerURL.NewBlobURL(storageFilepath)\n\n\tfile, err := os.Create(localFilepath)\n\trequire.NoError(t, err)\n\tdefer file.Close()\n\n\terr = azblob.DownloadBlobToFile(context.Background(), blobURL, 0, 0, file, azblob.DownloadFromBlobOptions{\n\t\tBlockSize:   4 * 1024 * 1024,\n\t\tParallelism: 16,\n\t\tRetryReaderOptionsPerBlock: azblob.RetryReaderOptions{\n\t\t\tMaxRetryRequests: 3,\n\t\t},\n\t})\n\trequire.NoError(t, err)\n\n\tif delete {\n\t\t_, err = blobURL.Delete(context.Background(), azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})\n\t\trequire.NoError(t, err)\n\t}\n}\n\nfunc downloadGCP(t *testing.T, conf *lkstorage.GCPConfig, localFilepath, storageFilepath string, delete bool) {\n\tctx := context.Background()\n\tvar client *storage.Client\n\n\tvar err error\n\tif conf.CredentialsJSON != \"\" {\n\t\tclient, err = storage.NewClient(ctx, option.WithCredentialsJSON([]byte(conf.CredentialsJSON)))\n\t} else {\n\t\tclient, err = storage.NewClient(ctx)\n\t}\n\trequire.NoError(t, err)\n\tdefer client.Close()\n\n\tfile, err := os.Create(localFilepath)\n\trequire.NoError(t, err)\n\tdefer file.Close()\n\n\trc, err := client.Bucket(conf.Bucket).Object(storageFilepath).Retryer(\n\t\tstorage.WithBackoff(\n\t\t\tgax.Backoff{\n\t\t\t\tInitial:    minDelay,\n\t\t\t\tMax:        maxDelay,\n\t\t\t\tMultiplier: 2,\n\t\t\t}),\n\t\tstorage.WithPolicy(storage.RetryAlways),\n\t).NewReader(ctx)\n\trequire.NoError(t, err)\n\n\t_, err = io.Copy(file, rc)\n\t_ = rc.Close()\n\trequire.NoError(t, err)\n\n\tif delete {\n\t\terr = client.Bucket(conf.Bucket).Object(storageFilepath).Delete(context.Background())\n\t\trequire.NoError(t, err)\n\t}\n}\n"
  },
  {
    "path": "test/edge.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n\tlksdk \"github.com/livekit/server-sdk-go/v2\"\n)\n\nfunc (r *Runner) testEdgeCases(t *testing.T) {\n\tif !r.should(runEdge) {\n\t\treturn\n\t}\n\n\tt.Run(\"EdgeCases\", func(t *testing.T) {\n\t\tfor _, test := range []*testCase{\n\n\t\t\t// RoomComposite with a late-joining participant (audio only).\n\t\t\t// Verifies that file duration reflects wall-clock time, not\n\t\t\t// inflated by the late track's PTS offset.\n\n\t\t\t{\n\t\t\t\tname:        \"RoomCompositeLateTrackDuration\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"room_composite_late_track_{time}\",\n\t\t\t\t\tfileType: livekit.EncodedFileType_OGG,\n\t\t\t\t},\n\t\t\t\tcustom: r.testRoomCompositeLateTrackDuration,\n\t\t\t},\n\n\t\t\t// Agents with room composite audio only\n\n\t\t\t{\n\t\t\t\tname:        \"Agents\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite,\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"agents_{time}\",\n\t\t\t\t},\n\t\t\t\tcustom: r.testAgents,\n\t\t\t},\n\n\t\t\t// RoomComposite audio mixing\n\n\t\t\t{\n\t\t\t\tname:        \"AudioMixing\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioOnly:   true,\n\t\t\t\t\taudioMixing: livekit.AudioMixing_DUAL_CHANNEL_AGENT,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"audio_mixing_{time}\",\n\t\t\t\t},\n\t\t\t\tcustom: r.testAudioMixing,\n\t\t\t},\n\n\t\t\t// ParticipantComposite where the participant never publishes\n\n\t\t\t{\n\t\t\t\tname:        \"ParticipantNoPublish\",\n\t\t\t\trequestType: types.RequestTypeParticipant,\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"participant_no_publish_{time}.mp4\",\n\t\t\t\t},\n\t\t\t\tcustom: r.testParticipantNoPublish,\n\t\t\t},\n\n\t\t\t// Test that the egress continues if a user leaves\n\n\t\t\t{\n\t\t\t\tname:        \"RoomCompositeStaysOpen\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite,\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"room_composite_stays_open_{time}.mp4\",\n\t\t\t\t},\n\t\t\t\tcustom: r.testRoomCompositeStaysOpen,\n\t\t\t},\n\n\t\t\t// Room composite where all participants leave and the server\n\t\t\t// eventually disconnects the egress. Verifies that the reported\n\t\t\t// duration includes the silence tail between participant departure\n\t\t\t// and server-initiated leave.\n\n\t\t\t{\n\t\t\t\tname:        \"RoomCompositeDisconnectDuration\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"room_composite_disconnect_duration_{time}\",\n\t\t\t\t\tfileType: livekit.EncodedFileType_OGG,\n\t\t\t\t},\n\t\t\t\tcustom: r.testRoomCompositeDisconnectDuration,\n\t\t\t},\n\n\t\t\t// RTMP output with no valid urls\n\n\t\t\t{\n\t\t\t\tname:        \"RtmpFailure\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t},\n\t\t\t\tstreamOptions: &streamOptions{\n\t\t\t\t\tstreamUrls: []string{badRtmpUrl1},\n\t\t\t\t\toutputType: types.OutputTypeRTMP,\n\t\t\t\t},\n\t\t\t\tcustom: r.testRtmpFailure,\n\t\t\t},\n\n\t\t\t// SRT output with no valid urls\n\n\t\t\t{\n\t\t\t\tname:        \"SrtFailure\",\n\t\t\t\trequestType: types.RequestTypeWeb,\n\t\t\t\tstreamOptions: &streamOptions{\n\t\t\t\t\tstreamUrls: []string{badSrtUrl1},\n\t\t\t\t\toutputType: types.OutputTypeSRT,\n\t\t\t\t},\n\t\t\t\tcustom: r.testSrtFailure,\n\t\t\t},\n\n\t\t\t// Track composite with data loss due to a disconnection\n\n\t\t\t{\n\t\t\t\tname:        \"TrackDisconnection\",\n\t\t\t\trequestType: types.RequestTypeTrackComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"track_disconnection_{time}.mp4\",\n\t\t\t\t\tfileType: livekit.EncodedFileType_MP4,\n\t\t\t\t},\n\t\t\t\tcustom: r.testTrackDisconnection,\n\t\t\t},\n\n\t\t\t// Stream output with no urls\n\n\t\t\t{\n\t\t\t\tname:        \"EmptyStreamBin\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeVP8,\n\t\t\t\t},\n\t\t\t\tstreamOptions: &streamOptions{\n\t\t\t\t\tstreamUrls: []string{rtmpUrl4, badRtmpUrl1},\n\t\t\t\t\toutputType: types.OutputTypeRTMP,\n\t\t\t\t},\n\t\t\t\tsegmentOptions: &segmentOptions{\n\t\t\t\t\tprefix:   \"empty_stream_{time}\",\n\t\t\t\t\tplaylist: \"empty_stream_{time}\",\n\t\t\t\t},\n\t\t\t\tcustom: r.testEmptyStreamBin,\n\t\t\t},\n\n\t\t\t// File storage limit reached\n\n\t\t\t{\n\t\t\t\tname:        \"FileStorageLimit\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeVP8,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"storage_limit_{time}.mp4\",\n\t\t\t\t\tfileType: livekit.EncodedFileType_MP4,\n\t\t\t\t},\n\t\t\t\tcustom: r.testStorageLimit,\n\t\t\t},\n\t\t} {\n\t\t\tif !r.run(t, test, test.custom) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc (r *Runner) testRoomCompositeLateTrackDuration(t *testing.T, test *testCase) {\n\t// First participant is already connected (r.room) and publishes audio immediately.\n\t// Start egress, wait for it to become active, then connect a second participant\n\t// after a delay. Stop egress and verify that the reported file duration is close\n\t// to wall-clock time and not inflated by the late track's synchronizer offset.\n\treq := r.build(test)\n\ttestStart := time.Now()\n\tegressID := r.startEgress(t, req)\n\n\t// Second participant joins several seconds after egress is active\n\ttime.Sleep(time.Second * 5)\n\n\tp2, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{\n\t\tAPIKey:              r.ApiKey,\n\t\tAPISecret:           r.ApiSecret,\n\t\tRoomName:            r.RoomName,\n\t\tParticipantName:     \"egress-late-joiner\",\n\t\tParticipantIdentity: fmt.Sprintf(\"late-joiner-%d\", rand.Intn(100)),\n\t}, lksdk.NewRoomCallback())\n\trequire.NoError(t, err)\n\tt.Cleanup(p2.Disconnect)\n\tr.publish(t, p2.LocalParticipant, types.MimeTypeOpus, make(chan struct{}))\n\n\t// Let the late track record for a few seconds\n\ttime.Sleep(time.Second * 7)\n\n\t// Stop and verify\n\tres := r.stopEgress(t, egressID)\n\twallClock := time.Since(testStart)\n\n\tfileRes := res.GetFile() //nolint:staticcheck\n\tif fileRes == nil {\n\t\trequire.Len(t, res.FileResults, 1)\n\t\tfileRes = res.FileResults[0]\n\t}\n\n\treportedDuration := time.Duration(fileRes.Duration)\n\tt.Logf(\"reported duration: %s, wall-clock: %s, startedAt: %d, endedAt: %d\",\n\t\treportedDuration, wallClock, fileRes.StartedAt, fileRes.EndedAt)\n\n\t// Reported duration must not exceed wall-clock time. It can legitimately be\n\t// shorter (pipeline startup delay between testStart and first packet), but\n\t// should never be longer.\n\trequire.LessOrEqual(t, reportedDuration.Seconds(), wallClock.Seconds()+3.0,\n\t\t\"file duration should not exceed wall-clock duration (inflated by late track offset)\")\n}\n\nfunc (r *Runner) testAgents(t *testing.T, test *testCase) {\n\t_, err := os.Stat(\"/agents/.env\")\n\tif err != nil {\n\t\tt.Skip(\"skipping agents test; missing env file\")\n\t}\n\n\tr.launchAgents(t)\n\ttime.Sleep(time.Second * 5)\n\tr.runFileTest(t, test)\n}\n\nfunc (r *Runner) testAudioMixing(t *testing.T, test *testCase) {\n\tp1, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{\n\t\tAPIKey:              r.ApiKey,\n\t\tAPISecret:           r.ApiSecret,\n\t\tRoomName:            r.RoomName,\n\t\tParticipantName:     \"egress-sample-1\",\n\t\tParticipantIdentity: fmt.Sprintf(\"sample-1-%d\", rand.Intn(100)),\n\t}, lksdk.NewRoomCallback())\n\trequire.NoError(t, err)\n\tt.Cleanup(p1.Disconnect)\n\tr.publish(t, p1.LocalParticipant, types.MimeTypeOpus, make(chan struct{}))\n\n\tagent, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{\n\t\tAPIKey:              r.ApiKey,\n\t\tAPISecret:           r.ApiSecret,\n\t\tRoomName:            r.RoomName,\n\t\tParticipantName:     \"egress-sample\",\n\t\tParticipantIdentity: fmt.Sprintf(\"agent-%d\", rand.Intn(100)),\n\t\tParticipantKind:     lksdk.ParticipantAgent,\n\t}, lksdk.NewRoomCallback())\n\trequire.NoError(t, err)\n\tt.Cleanup(agent.Disconnect)\n\tr.publish(t, agent.LocalParticipant, types.MimeTypeOpus, make(chan struct{}))\n\n\tp2, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{\n\t\tAPIKey:              r.ApiKey,\n\t\tAPISecret:           r.ApiSecret,\n\t\tRoomName:            r.RoomName,\n\t\tParticipantName:     \"egress-sample\",\n\t\tParticipantIdentity: fmt.Sprintf(\"sample-2-%d\", rand.Intn(100)),\n\t}, lksdk.NewRoomCallback())\n\trequire.NoError(t, err)\n\tt.Cleanup(p2.Disconnect)\n\tr.publish(t, p2.LocalParticipant, types.MimeTypeOpus, make(chan struct{}))\n\n\tr.runFileTest(t, test)\n}\n\nfunc (r *Runner) testParticipantNoPublish(t *testing.T, test *testCase) {\n\tidentity := r.room.LocalParticipant.Identity()\n\n\treq := r.build(test)\n\n\tinfo := r.sendRequest(t, req)\n\ttime.Sleep(time.Second * 15)\n\tr.room.Disconnect()\n\ttime.Sleep(time.Second * 30)\n\tinfo = r.getUpdate(t, info.EgressId)\n\trequire.Equal(t, livekit.EgressStatus_EGRESS_ABORTED.String(), info.Status.String())\n\n\t// reconnect the publisher to the room\n\troom, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{\n\t\tAPIKey:              r.ApiKey,\n\t\tAPISecret:           r.ApiSecret,\n\t\tRoomName:            r.RoomName,\n\t\tParticipantName:     \"egress-sample\",\n\t\tParticipantIdentity: identity,\n\t}, lksdk.NewRoomCallback())\n\trequire.NoError(t, err)\n\tr.room = room\n}\n\nfunc (r *Runner) testRoomCompositeStaysOpen(t *testing.T, test *testCase) {\n\treq := r.build(test)\n\n\tinfo := r.sendRequest(t, req)\n\ttime.Sleep(time.Second * 10)\n\tidentity := r.room.LocalParticipant.Identity()\n\tr.room.Disconnect()\n\ttime.Sleep(time.Second * 10)\n\n\t// reconnect the publisher to the room\n\troom, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{\n\t\tAPIKey:              r.ApiKey,\n\t\tAPISecret:           r.ApiSecret,\n\t\tRoomName:            r.RoomName,\n\t\tParticipantName:     \"egress-sample\",\n\t\tParticipantIdentity: identity,\n\t}, lksdk.NewRoomCallback())\n\trequire.NoError(t, err)\n\tr.room = room\n\n\tr.publishSample(t, types.MimeTypeOpus, 0, 0, false)\n\tr.publishSample(t, types.MimeTypeVP8, 0, 0, false)\n\n\ttime.Sleep(time.Second * 10)\n\n\tr.checkUpdate(t, info.EgressId, livekit.EgressStatus_EGRESS_ACTIVE)\n\tr.stopEgress(t, info.EgressId)\n}\n\nfunc (r *Runner) testRoomCompositeDisconnectDuration(t *testing.T, test *testCase) {\n\t// Start egress, record for a while, then disconnect all participants.\n\t// The server will eventually disconnect the egress after departure_timeout.\n\t// The file will contain silence during that gap, so endedAt must\n\t// reflect the full file content including the silence tail.\n\tconst departureTimeout = 20 // seconds\n\n\t// Create the room with an explicit departure_timeout so the silence\n\t// gap is predictable regardless of server defaults.\n\troomClient := lksdk.NewRoomServiceClient(r.WsUrl, r.ApiKey, r.ApiSecret)\n\t_, err := roomClient.CreateRoom(context.Background(), &livekit.CreateRoomRequest{\n\t\tName:             r.RoomName,\n\t\tDepartureTimeout: departureTimeout,\n\t})\n\trequire.NoError(t, err)\n\n\treq := r.build(test)\n\tegressID := r.startEgress(t, req)\n\n\t// Record with active audio for 10 seconds\n\ttime.Sleep(time.Second * 10)\n\n\t// Disconnect all participants — the room becomes empty, but the\n\t// egress stays connected until the server kicks it out.\n\tdisconnectTime := time.Now()\n\tidentity := r.room.LocalParticipant.Identity()\n\tr.room.Disconnect()\n\n\t// Reconnect the publisher on exit so subsequent tests have a room\n\tdefer func() {\n\t\troom, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{\n\t\t\tAPIKey:              r.ApiKey,\n\t\t\tAPISecret:           r.ApiSecret,\n\t\t\tRoomName:            r.RoomName,\n\t\t\tParticipantName:     \"egress-sample\",\n\t\t\tParticipantIdentity: identity,\n\t\t}, lksdk.NewRoomCallback())\n\t\trequire.NoError(t, err)\n\t\tr.room = room\n\t}()\n\n\t// Wait for the egress to complete on its own (server-initiated leave).\n\t// Drain updates until we see EGRESS_COMPLETE or EGRESS_FAILED.\n\tvar res *livekit.EgressInfo\n\tdeadline := time.After(90 * time.Second)\n\tfor res == nil {\n\t\tselect {\n\t\tcase info := <-r.updates:\n\t\t\tif info.EgressId != egressID {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch info.Status {\n\t\t\tcase livekit.EgressStatus_EGRESS_COMPLETE:\n\t\t\t\tres = info\n\t\t\tcase livekit.EgressStatus_EGRESS_FAILED:\n\t\t\t\tt.Fatalf(\"egress failed: %s\", info.Error)\n\t\t\t}\n\t\tcase <-deadline:\n\t\t\tt.Fatal(\"timed out waiting for egress to complete after room disconnect\")\n\t\t}\n\t}\n\n\tsilenceGap := time.Since(disconnectTime)\n\tt.Logf(\"silence gap after disconnect: %s\", silenceGap)\n\n\tfileRes := res.GetFile() //nolint:staticcheck\n\tif fileRes == nil {\n\t\trequire.Len(t, res.FileResults, 1)\n\t\tfileRes = res.FileResults[0]\n\t}\n\n\treportedDuration := time.Duration(fileRes.Duration)\n\tt.Logf(\"reported duration: %s, startedAt: %d, endedAt: %d\",\n\t\treportedDuration, fileRes.StartedAt, fileRes.EndedAt)\n\n\t// The reported duration should include the silence tail. The room was\n\t// created with departure_timeout=20s, so the server disconnects the\n\t// egress ~20s after the last participant leaves. We allow 5s of slack\n\t// for pipeline startup/teardown.\n\tminExpected := 10*time.Second + silenceGap - 5*time.Second\n\trequire.GreaterOrEqual(t, reportedDuration, minExpected,\n\t\t\"file duration should include silence tail after participants left\")\n}\n\nfunc (r *Runner) testStorageLimit(t *testing.T, test *testCase) {\n\torigLimit := r.FileOutputMaxSize\n\tr.FileOutputMaxSize = 300000 // ~300KB to trigger quickly\n\tt.Cleanup(func() {\n\t\tr.FileOutputMaxSize = origLimit\n\t})\n\n\treq := r.build(test)\n\tinfo := r.sendRequest(t, req)\n\tegressID := info.EgressId\n\n\tdeadline := time.After(45 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-deadline:\n\t\t\tt.Fatal(\"timed out waiting for storage limit\")\n\t\tdefault:\n\t\t}\n\n\t\tupdate := r.getUpdate(t, egressID)\n\t\tswitch update.Status { //nolint:revive // EGRESS_ACTIVE explicitly listed for readability\n\t\tcase livekit.EgressStatus_EGRESS_ACTIVE:\n\t\t\tcontinue\n\t\tcase livekit.EgressStatus_EGRESS_LIMIT_REACHED:\n\t\t\tfile := update.GetFile() //nolint:staticcheck // keep deprecated field for older clients\n\t\t\tif file == nil && len(update.FileResults) > 0 {\n\t\t\t\tfile = update.FileResults[0]\n\t\t\t}\n\t\t\trequire.NotNil(t, file)\n\t\t\trequire.Contains(t, update.Details, livekit.EndReasonLimitReached)\n\t\t\trequire.NotEmpty(t, update.Error)\n\t\t\treturn\n\t\tcase livekit.EgressStatus_EGRESS_FAILED:\n\t\t\tt.Fatalf(\"egress failed: %s\", update.Error)\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc (r *Runner) testRtmpFailure(t *testing.T, test *testCase) {\n\treq := r.build(test)\n\n\tinfo, err := r.StartEgress(context.Background(), req)\n\trequire.NoError(t, err)\n\trequire.Empty(t, info.Error)\n\trequire.NotEmpty(t, info.EgressId)\n\trequire.Equal(t, r.RoomName, info.RoomName)\n\trequire.Equal(t, livekit.EgressStatus_EGRESS_STARTING, info.Status)\n\n\t// check updates\n\ttime.Sleep(time.Second * 5)\n\tinfo = r.getUpdate(t, info.EgressId)\n\tstreamFailed := false\n\tfor info.Status == livekit.EgressStatus_EGRESS_ACTIVE {\n\t\tif !streamFailed && info.StreamResults[0].Status == livekit.StreamInfo_FAILED {\n\t\t\tstreamFailed = true\n\t\t}\n\t\tif streamFailed {\n\t\t\t// make sure this never reverts in subsequent updates\n\t\t\trequire.Equal(t, livekit.StreamInfo_FAILED, info.StreamResults[0].Status)\n\t\t}\n\t\tinfo = r.getUpdate(t, info.EgressId)\n\t}\n\n\trequire.Equal(t, livekit.EgressStatus_EGRESS_FAILED, info.Status)\n\trequire.NotEmpty(t, info.Error)\n\trequire.Equal(t, livekit.StreamInfo_FAILED, info.StreamResults[0].Status)\n\trequire.NotEmpty(t, info.StreamResults[0].Error)\n}\n\nfunc (r *Runner) testSrtFailure(t *testing.T, test *testCase) {\n\treq := r.build(test)\n\n\tinfo, err := r.StartEgress(context.Background(), req)\n\trequire.NoError(t, err)\n\trequire.Empty(t, info.Error)\n\trequire.NotEmpty(t, info.EgressId)\n\trequire.Equal(t, livekit.EgressStatus_EGRESS_STARTING, info.Status)\n\n\t// check update\n\ttime.Sleep(time.Second * 5)\n\tinfo = r.getUpdate(t, info.EgressId)\n\tif info.Status == livekit.EgressStatus_EGRESS_ACTIVE {\n\t\tr.checkUpdate(t, info.EgressId, livekit.EgressStatus_EGRESS_FAILED)\n\t} else {\n\t\trequire.Equal(t, livekit.EgressStatus_EGRESS_FAILED, info.Status)\n\t}\n}\n\nfunc (r *Runner) testTrackDisconnection(t *testing.T, test *testCase) {\n\ttest.videoTrackID = r.publishSampleWithDisconnection(t, types.MimeTypeVP8)\n\tr.runFileTest(t, test)\n}\n\nfunc (r *Runner) testEmptyStreamBin(t *testing.T, test *testCase) {\n\treq := r.build(test)\n\n\tinfo := r.sendRequest(t, req)\n\tegressID := info.EgressId\n\ttime.Sleep(time.Second * 15)\n\n\t// get params\n\tp, err := config.GetValidatedPipelineConfig(r.ServiceConfig, req)\n\trequire.NoError(t, err)\n\n\tr.checkStreamUpdate(t, egressID, map[string]livekit.StreamInfo_Status{\n\t\trtmpUrl4Redacted:    livekit.StreamInfo_ACTIVE,\n\t\tbadRtmpUrl1Redacted: livekit.StreamInfo_FAILED,\n\t})\n\t_, err = r.client.UpdateStream(context.Background(), egressID, &livekit.UpdateStreamRequest{\n\t\tEgressId:         egressID,\n\t\tRemoveOutputUrls: []string{rtmpUrl4},\n\t})\n\trequire.NoError(t, err)\n\tr.checkStreamUpdate(t, egressID, map[string]livekit.StreamInfo_Status{\n\t\trtmpUrl4Redacted:    livekit.StreamInfo_FINISHED,\n\t\tbadRtmpUrl1Redacted: livekit.StreamInfo_FAILED,\n\t})\n\n\ttime.Sleep(time.Second * 10)\n\tres := r.stopEgress(t, egressID)\n\tr.verifySegments(t, test, p, livekit.SegmentedFileSuffix_INDEX, res, false)\n}\n"
  },
  {
    "path": "test/ffprobe.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"os/exec\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/llehouerou/go-mp3/lameinfo\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n)\n\nconst (\n\tmaxRetries = 5\n\tminDelay   = time.Millisecond * 100\n\tmaxDelay   = time.Second * 5\n)\n\nvar (\n\tsegmentTimeRegexp = regexp.MustCompile(`_(\\d{14})(\\d{3})\\.ts`)\n)\n\ntype FFProbeInfo struct {\n\tStreams []struct {\n\t\tCodecName string `json:\"codec_name\"`\n\t\tCodecType string `json:\"codec_type\"`\n\t\tProfile   string `json:\"profile\"`\n\n\t\t// audio\n\t\tSampleRate    string `json:\"sample_rate\"`\n\t\tChannels      int    `json:\"channels\"`\n\t\tChannelLayout string `json:\"channel_layout\"`\n\n\t\t// video\n\t\tWidth        int32  `json:\"width\"`\n\t\tHeight       int32  `json:\"height\"`\n\t\tRFrameRate   string `json:\"r_frame_rate\"`\n\t\tAvgFrameRate string `json:\"avg_frame_rate\"`\n\t\tBitRate      string `json:\"bit_rate\"`\n\t} `json:\"streams\"`\n\tFormat struct {\n\t\tFilename   string `json:\"filename\"`\n\t\tFormatName string `json:\"format_name\"`\n\t\tDuration   string `json:\"duration\"`\n\t\tSize       string `json:\"size\"`\n\t\tProbeScore int    `json:\"probe_score\"`\n\t\tTags       struct {\n\t\t\tEncoder string `json:\"encoder\"`\n\t\t} `json:\"tags\"`\n\t} `json:\"format\"`\n}\n\nfunc ffprobe(input string) (*FFProbeInfo, error) {\n\targs := []string{\n\t\t\"-v\", \"quiet\",\n\t\t\"-hide_banner\",\n\t\t\"-show_format\",\n\t\t\"-show_streams\",\n\t\t\"-print_format\", \"json\",\n\t}\n\n\tif strings.HasSuffix(input, \".raw\") {\n\t\targs = append(args,\n\t\t\t\"-f\", \"s16le\",\n\t\t\t\"-ac\", \"2\",\n\t\t\t\"-ar\", \"48k\",\n\t\t)\n\t}\n\n\targs = append(args, input)\n\n\tctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)\n\tdefer cancel()\n\n\tcmd := exec.CommandContext(ctx, \"ffprobe\", args...)\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\tif errors.Is(ctx.Err(), context.DeadlineExceeded) {\n\t\t\treturn nil, fmt.Errorf(\"ffprobe timeout after 15s\")\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tinfo := &FFProbeInfo{}\n\terr = json.Unmarshal(out, info)\n\treturn info, err\n}\n\nfunc verify(t *testing.T, in string, p *config.PipelineConfig, res *livekit.EgressInfo, egressType types.EgressType, withMuting bool, sourceFramerate float64, live bool) *FFProbeInfo {\n\tinfo, err := ffprobe(in)\n\trequire.NoError(t, err)\n\n\t// Check source type\n\tif res != nil {\n\t\tif (p.RequestType == types.RequestTypeRoomComposite || p.RequestType == types.RequestTypeTemplate) && (p.VideoEnabled || p.Layout != \"\") {\n\t\t\trequire.Equal(t, livekit.EgressSourceType_EGRESS_SOURCE_TYPE_WEB, res.SourceType)\n\t\t} else if p.RequestType == types.RequestTypeWeb {\n\t\t\trequire.Equal(t, livekit.EgressSourceType_EGRESS_SOURCE_TYPE_WEB, res.SourceType)\n\t\t} else {\n\t\t\trequire.Equal(t, livekit.EgressSourceType_EGRESS_SOURCE_TYPE_SDK, res.SourceType)\n\t\t}\n\t}\n\n\tswitch egressType {\n\tcase types.EgressTypeFile:\n\t\t// size\n\t\trequire.NotEqual(t, \"0\", info.Format.Size)\n\n\t\t// duration\n\t\tfileRes := res.GetFile() //nolint:staticcheck\n\t\tif fileRes == nil {\n\t\t\tfileRes = res.FileResults[0]\n\t\t}\n\t\texpected := float64(fileRes.Duration) / 1e9\n\t\tactual, err := strconv.ParseFloat(info.Format.Duration, 64)\n\t\trequire.NoError(t, err)\n\n\t\t// file duration can be different from egress duration based on keyframes, muting, and latency\n\t\tdelta := 5.0\n\t\tswitch p.RequestType {\n\t\tcase types.RequestTypeRoomComposite, types.RequestTypeTemplate, types.RequestTypeWeb:\n\t\t\trequire.InDelta(t, expected, actual, delta)\n\n\t\tcase types.RequestTypeTrack:\n\t\t\tif p.AudioEnabled {\n\t\t\t\tif withMuting {\n\t\t\t\t\tdelta = 6\n\t\t\t\t}\n\t\t\t\trequire.InDelta(t, expected, actual, delta)\n\t\t\t}\n\t\t}\n\n\tcase types.EgressTypeSegments:\n\t\tactual, err := strconv.ParseFloat(info.Format.Duration, 64)\n\t\trequire.NoError(t, err)\n\n\t\trequire.Len(t, res.GetSegmentResults(), 1)\n\t\tsegments := res.GetSegmentResults()[0]\n\n\t\tif live {\n\t\t\trequire.InDelta(t, float64(5*p.GetSegmentConfig().SegmentDuration), actual, float64(p.GetSegmentConfig().SegmentDuration))\n\t\t} else {\n\t\t\texpected := int64(math.Ceil(actual / float64(p.GetSegmentConfig().SegmentDuration)))\n\t\t\trequire.InDelta(t, expected, segments.SegmentCount, 1)\n\t\t}\n\n\tcase types.EgressTypeWebsocket:\n\t\tsize, err := strconv.Atoi(info.Format.Size)\n\t\trequire.NoError(t, err)\n\t\trequire.Greater(t, size, 6300000)\n\n\t\texpected := float64(res.StreamResults[0].Duration) / 1e9\n\t\tactual, err := strconv.ParseFloat(info.Format.Duration, 64)\n\t\trequire.NoError(t, err)\n\n\t\trequire.InDelta(t, expected, actual, 4.1)\n\t}\n\n\t// verify Xing/Info header for MP3 files\n\tif egressType == types.EgressTypeFile && p.AudioOutCodec == types.MimeTypeMP3 {\n\t\tfpDuration, _ := strconv.ParseFloat(info.Format.Duration, 64)\n\t\tverifyXingHeader(t, in, int(p.AudioFrequency), fpDuration)\n\t}\n\n\t// check stream info\n\tvar hasAudio, hasVideo bool\n\tfor _, stream := range info.Streams {\n\t\tswitch stream.CodecType {\n\t\tcase \"audio\":\n\t\t\thasAudio = true\n\n\t\t\t// codec\n\t\t\tswitch p.AudioOutCodec {\n\t\t\tcase types.MimeTypeAAC:\n\t\t\t\trequire.Equal(t, \"aac\", stream.CodecName)\n\t\t\t\trequire.Equal(t, fmt.Sprint(p.AudioFrequency), stream.SampleRate)\n\t\t\t\trequire.Equal(t, \"stereo\", stream.ChannelLayout)\n\n\t\t\tcase types.MimeTypeOpus:\n\t\t\t\trequire.Equal(t, \"opus\", stream.CodecName)\n\t\t\t\trequire.Equal(t, \"48000\", stream.SampleRate)\n\t\t\t\trequire.Equal(t, \"stereo\", stream.ChannelLayout)\n\n\t\t\tcase types.MimeTypeMP3:\n\t\t\t\trequire.Equal(t, \"mp3\", stream.CodecName)\n\t\t\t\trequire.Equal(t, fmt.Sprint(p.AudioFrequency), stream.SampleRate)\n\t\t\t\trequire.Equal(t, \"stereo\", stream.ChannelLayout)\n\n\t\t\t\t// verify CBR: stream bitrate should match configured bitrate\n\t\t\t\tbitrate, err := strconv.Atoi(stream.BitRate)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.InDelta(t, int(p.AudioBitrate)*1000, bitrate, 5000,\n\t\t\t\t\t\"MP3 bitrate %d bps not close to configured %d kbps\", bitrate, p.AudioBitrate)\n\n\t\t\tcase types.MimeTypeRawAudio:\n\t\t\t\trequire.Equal(t, \"pcm_s16le\", stream.CodecName)\n\t\t\t\trequire.Equal(t, \"48000\", stream.SampleRate)\n\t\t\t}\n\n\t\t\t// channels\n\t\t\trequire.Equal(t, 2, stream.Channels)\n\n\t\t\t// audio bitrate\n\t\t\tif p.Outputs[egressType][0].GetOutputType() == types.OutputTypeMP4 {\n\t\t\t\tbitrate, err := strconv.Atoi(stream.BitRate)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.NotZero(t, bitrate)\n\t\t\t}\n\n\t\tcase \"video\":\n\t\t\thasVideo = true\n\n\t\t\t// codec and profile\n\t\t\tswitch p.VideoOutCodec {\n\t\t\tcase types.MimeTypeH264:\n\t\t\t\trequire.Equal(t, \"h264\", stream.CodecName)\n\n\t\t\t\tif p.VideoEncoding {\n\t\t\t\t\tswitch p.VideoProfile {\n\t\t\t\t\tcase types.ProfileBaseline:\n\t\t\t\t\t\trequire.Equal(t, \"Constrained Baseline\", stream.Profile)\n\t\t\t\t\tcase types.ProfileMain:\n\t\t\t\t\t\trequire.Equal(t, \"Main\", stream.Profile)\n\t\t\t\t\tcase types.ProfileHigh:\n\t\t\t\t\t\trequire.Equal(t, \"High\", stream.Profile)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase types.MimeTypeVP8:\n\t\t\t\trequire.Equal(t, \"vp8\", stream.CodecName)\n\t\t\tcase types.MimeTypeVP9:\n\t\t\t\trequire.Equal(t, \"vp9\", stream.CodecName)\n\t\t\t}\n\n\t\t\tif p.VideoEncoding {\n\t\t\t\t// dimensions\n\t\t\t\trequire.Equal(t, p.Width, stream.Width)\n\t\t\t\trequire.Equal(t, p.Height, stream.Height)\n\t\t\t}\n\n\t\t\tswitch p.Outputs[egressType][0].GetOutputType() {\n\t\t\tcase types.OutputTypeIVF:\n\t\t\t\trequire.Equal(t, \"vp8\", stream.CodecName)\n\n\t\t\tcase types.OutputTypeMP4:\n\t\t\t\trequire.Equal(t, \"h264\", stream.CodecName)\n\n\t\t\t\tif p.VideoEncoding {\n\t\t\t\t\t// bitrate, not available for HLS or WebM\n\t\t\t\t\tbitrate, err := strconv.Atoi(stream.BitRate)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\trequire.NotZero(t, bitrate)\n\t\t\t\t\trequire.Less(t, int32(bitrate), p.VideoBitrate*1050)\n\n\t\t\t\t\t// framerate\n\t\t\t\t\tfrac := strings.Split(stream.AvgFrameRate, \"/\")\n\t\t\t\t\trequire.Len(t, frac, 2)\n\t\t\t\t\tn, err := strconv.ParseFloat(frac[0], 64)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\td, err := strconv.ParseFloat(frac[1], 64)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\trequire.NotZero(t, d)\n\t\t\t\t\trequire.Less(t, n/d, float64(p.Framerate)*1.5)\n\t\t\t\t\trequire.Greater(t, n/d, float64(sourceFramerate)*0.8)\n\t\t\t\t}\n\n\t\t\tcase types.OutputTypeHLS:\n\t\t\t\trequire.Equal(t, \"h264\", stream.CodecName)\n\t\t\t}\n\n\t\tdefault:\n\t\t\tt.Fatalf(\"unrecognized stream type %s\", stream.CodecType)\n\t\t}\n\t}\n\n\tif p.AudioEnabled {\n\t\trequire.True(t, hasAudio)\n\t\trequire.NotEmpty(t, p.AudioOutCodec)\n\t}\n\n\tif p.VideoEnabled {\n\t\trequire.True(t, hasVideo)\n\t\trequire.NotEmpty(t, p.VideoOutCodec)\n\t}\n\treturn info\n}\n\n// parseFFProbeDuration supports either \"123.456\" (seconds) or \"HH:MM:SS.mmm\"\nfunc parseFFProbeDuration(s string) (time.Duration, error) {\n\ts = strings.TrimSpace(s)\n\tif s == \"\" {\n\t\treturn 0, errors.New(\"empty duration\")\n\t}\n\n\tif strings.Contains(s, \":\") {\n\t\t// HH:MM:SS(.frac)\n\t\tparts := strings.Split(s, \":\")\n\t\tif len(parts) != 3 {\n\t\t\treturn 0, fmt.Errorf(\"invalid H:M:S format: %q\", s)\n\t\t}\n\t\th, err := strconv.ParseFloat(parts[0], 64)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"invalid h part: %w\", err)\n\t\t}\n\t\tm, err := strconv.ParseFloat(parts[1], 64)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"invalid m part: %w\", err)\n\t\t}\n\t\tsec, err := strconv.ParseFloat(parts[2], 64)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"invalid s part: %w\", err)\n\t\t}\n\t\ttotal := h*3600 + m*60 + sec\n\t\treturn time.Duration(total * float64(time.Second)), nil\n\t}\n\n\t// Plain seconds (stringified float)\n\tf, err := strconv.ParseFloat(s, 64)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"invalid seconds format %q: %w\", s, err)\n\t}\n\treturn time.Duration(f * float64(time.Second)), nil\n}\n\n// verifyXingHeader checks that an MP3 file contains a valid Xing/Info header\n// and that the duration derived from its frame count matches ffprobe.\nfunc verifyXingHeader(t *testing.T, filepath string, sampleRate int, ffprobeDuration float64) {\n\tt.Helper()\n\n\tf, err := os.Open(filepath)\n\trequire.NoError(t, err)\n\tdefer f.Close()\n\n\txi, err := lameinfo.ParseFromReader(f)\n\trequire.NoError(t, err, \"MP3 file missing Xing/Info header\")\n\n\trequire.True(t, xi.HasFrameCount(), \"Xing header missing frame count\")\n\trequire.NotZero(t, xi.FrameCount, \"Xing header has zero frame count\")\n\n\trequire.True(t, xi.HasTOC(), \"Xing header missing TOC seek table\")\n\n\t// MPEG1 Layer 3: 1152 samples per frame.\n\t// Cross-check Xing frame count against ffprobe duration.\n\tconst samplesPerFrame = 1152\n\txingDuration := float64(xi.FrameCount) * samplesPerFrame / float64(sampleRate)\n\trequire.InDelta(t, ffprobeDuration, xingDuration, 0.1,\n\t\t\"Xing duration (%0.3fs from %d frames) does not match ffprobe duration (%0.3fs)\",\n\t\txingDuration, xi.FrameCount, ffprobeDuration)\n}\n"
  },
  {
    "path": "test/file.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"path\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n)\n\nfunc (r *Runner) testFile(t *testing.T) {\n\tif !r.should(runFile) {\n\t\treturn\n\t}\n\n\tt.Run(\"File\", func(t *testing.T) {\n\t\tfor _, test := range []*testCase{\n\n\t\t\t// ---- Room Composite -----\n\n\t\t\t{\n\t\t\t\tname:        \"RoomComposite/Base\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\tlayout:     \"speaker\",\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"r_{room_name}_{time}.mp4\",\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.fullContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"RoomComposite/VideoOnly\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\tvideoOnly:  true,\n\t\t\t\t\tlayout:     \"speaker\",\n\t\t\t\t},\n\t\t\t\tencodingOptions: &livekit.EncodingOptions{\n\t\t\t\t\tVideoCodec: livekit.VideoCodec_H264_HIGH,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"r_{room_name}_video_{time}.mp4\",\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.videoOnlyContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"RoomComposite/AudioOnly\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t},\n\t\t\t\tencodingOptions: &livekit.EncodingOptions{\n\t\t\t\t\tAudioCodec: livekit.AudioCodec_OPUS,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"r_{room_name}_audio_{time}\",\n\t\t\t\t\tfileType: livekit.EncodedFileType_OGG,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.audioOnlyContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"RoomComposite/AudioOnlyMP3\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"r_{room_name}_audio_mp3_{time}\",\n\t\t\t\t\tfileType: livekit.EncodedFileType_MP3,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.audioOnlyContentCheck,\n\t\t\t},\n\n\t\t\t// ---------- Web ----------\n\n\t\t\t{\n\t\t\t\tname: \"Web\",\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\tvideoOnly: true,\n\t\t\t\t},\n\t\t\t\trequestType: types.RequestTypeWeb,\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"web_{time}\",\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t// ------ Participant ------\n\n\t\t\t{\n\t\t\t\tname:        \"ParticipantComposite/VP8\",\n\t\t\t\trequestType: types.RequestTypeParticipant, publishOptions: publishOptions{\n\t\t\t\t\taudioCodec:     types.MimeTypeOpus,\n\t\t\t\t\taudioDelay:     time.Second * 8,\n\t\t\t\t\taudioUnpublish: time.Second * 14,\n\t\t\t\t\taudioRepublish: time.Second * 20,\n\t\t\t\t\tvideoCodec:     types.MimeTypeVP8,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"participant_{publisher_identity}_vp8_{time}.mp4\",\n\t\t\t\t\tfileType: livekit.EncodedFileType_MP4,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"ParticipantComposite/H264\",\n\t\t\t\trequestType: types.RequestTypeParticipant, publishOptions: publishOptions{\n\t\t\t\t\taudioCodec:     types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec:     types.MimeTypeH264,\n\t\t\t\t\tvideoUnpublish: time.Second * 10,\n\t\t\t\t\tvideoRepublish: time.Second * 20,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"participant_{room_name}_h264_{time}.mp4\",\n\t\t\t\t\tfileType: livekit.EncodedFileType_MP4,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.fullContentCheckWithVideoUnpublishAt10AndRepublishAt20,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"ParticipantComposite/AudioOnly\",\n\t\t\t\trequestType: types.RequestTypeParticipant, publishOptions: publishOptions{\n\t\t\t\t\taudioCodec:     types.MimeTypeOpus,\n\t\t\t\t\taudioUnpublish: time.Second * 10,\n\t\t\t\t\taudioRepublish: time.Second * 15,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"participant_{room_name}_{time}.mp4\",\n\t\t\t\t\tfileType: livekit.EncodedFileType_MP4,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t// ---- Track Composite ----\n\n\t\t\t{\n\t\t\t\tname:        \"TrackComposite/VP8\",\n\t\t\t\trequestType: types.RequestTypeTrackComposite, publishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeVP8,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"tc_{publisher_identity}_vp8_{time}.mp4\",\n\t\t\t\t\tfileType: livekit.EncodedFileType_MP4,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.fullContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"TrackComposite/VideoOnly\",\n\t\t\t\trequestType: types.RequestTypeTrackComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\tvideoOnly:  true,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"tc_{room_name}_video_{time}.mp4\",\n\t\t\t\t\tfileType: livekit.EncodedFileType_MP4,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.videoOnlyContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"TrackComposite/AudioOnlyMP3\",\n\t\t\t\trequestType: types.RequestTypeTrackComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename:   \"tc_{room_name}_audio_mp3_{time}\",\n\t\t\t\t\tfileType:   livekit.EncodedFileType_MP3,\n\t\t\t\t\toutputType: types.OutputTypeMP3,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.audioOnlyContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"TrackComposite/AudioOnlyPCMU\",\n\t\t\t\trequestType: types.RequestTypeTrackComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypePCMU,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename:   \"tc_{room_name}_audio_pcmu_{time}.mp4\",\n\t\t\t\t\tfileType:   livekit.EncodedFileType_MP4,\n\t\t\t\t\toutputType: types.OutputTypeMP4,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.audioOnlyContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"TrackComposite/AudioOnlyPCMA\",\n\t\t\t\trequestType: types.RequestTypeTrackComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypePCMA,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename:   \"tc_{room_name}_audio_pcma_{time}.mp4\",\n\t\t\t\t\tfileType:   livekit.EncodedFileType_MP4,\n\t\t\t\t\toutputType: types.OutputTypeMP4,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.audioOnlyContentCheck,\n\t\t\t},\n\n\t\t\t// --------- Track ---------\n\n\t\t\t{\n\t\t\t\tname:        \"Track/Opus\",\n\t\t\t\trequestType: types.RequestTypeTrack,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename:   \"t_{track_source}_{time}.ogg\",\n\t\t\t\t\toutputType: types.OutputTypeOGG,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.audioOnlyContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"Track/PCMU\",\n\t\t\t\trequestType: types.RequestTypeTrack,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypePCMU,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename:   \"t_{track_source}_pcmu_{time}.ogg\",\n\t\t\t\t\toutputType: types.OutputTypeOGG,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.audioOnlyContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"Track/PCMA\",\n\t\t\t\trequestType: types.RequestTypeTrack,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypePCMA,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename:   \"t_{track_source}_pcma_{time}.ogg\",\n\t\t\t\t\toutputType: types.OutputTypeOGG,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.audioOnlyContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"Track/H264\",\n\t\t\t\trequestType: types.RequestTypeTrack,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\tvideoOnly:  true,\n\t\t\t\t},\n\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename:   \"t_{track_id}_{time}.mp4\",\n\t\t\t\t\toutputType: types.OutputTypeMP4,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.videoOnlyContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"Track/VP8\",\n\t\t\t\trequestType: types.RequestTypeTrack,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\tvideoCodec: types.MimeTypeVP8,\n\t\t\t\t\tvideoOnly:  true,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename:   \"t_{track_type}_{time}.webm\",\n\t\t\t\t\toutputType: types.OutputTypeWebM,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.videoOnlyContentCheck,\n\t\t\t},\n\t\t\t// {\n\t\t\t// \tname:       \"Track/VP9\",\n\t\t\t// \tvideoOnly:  true,\n\t\t\t// \tvideoCodec: types.MimeTypeVP9,\n\t\t\t// \toutputType: types.OutputTypeWebM,\n\t\t\t// \tfilename:   \"t_{track_type}_{time}.webm\",\n\t\t\t// },\n\n\t\t\t// -------- Template --------\n\n\t\t\t{\n\t\t\t\tname:        \"Template/AudioOnly\",\n\t\t\t\trequestType: types.RequestTypeTemplate,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t},\n\t\t\t\tencodingOptions: &livekit.EncodingOptions{\n\t\t\t\t\tAudioCodec: livekit.AudioCodec_OPUS,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"template_audio_{time}\",\n\t\t\t\t\tfileType: livekit.EncodedFileType_OGG,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.audioOnlyContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"Template/VideoOnly\",\n\t\t\t\trequestType: types.RequestTypeTemplate,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\tvideoOnly:  true,\n\t\t\t\t\tlayout:     \"speaker\",\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"template_video_{time}.mp4\",\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.videoOnlyContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"Template/Base\",\n\t\t\t\trequestType: types.RequestTypeTemplate,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\tlayout:     \"speaker\",\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"template_{time}.mp4\",\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.fullContentCheck,\n\t\t\t},\n\n\t\t\t// --------- Web V2 --------\n\n\t\t\t{\n\t\t\t\tname:        \"WebV2/AudioOnly\",\n\t\t\t\trequestType: types.RequestTypeWeb,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioOnly: true,\n\t\t\t\t},\n\t\t\t\tencodingOptions: &livekit.EncodingOptions{\n\t\t\t\t\tAudioCodec: livekit.AudioCodec_OPUS,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"webv2_audio_{time}\",\n\t\t\t\t\tfileType: livekit.EncodedFileType_OGG,\n\t\t\t\t},\n\t\t\t\tv2OutputOptions: &v2OutputOptions{},\n\t\t\t\tcontentCheck:    r.audioOnlyContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"WebV2/VideoOnly\",\n\t\t\t\trequestType: types.RequestTypeWeb,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\tvideoOnly: true,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"webv2_video_{time}.mp4\",\n\t\t\t\t},\n\t\t\t\tv2OutputOptions: &v2OutputOptions{},\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"WebV2/Base\",\n\t\t\t\trequestType: types.RequestTypeWeb,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"webv2_{time}.mp4\",\n\t\t\t\t},\n\t\t\t\tv2OutputOptions: &v2OutputOptions{},\n\t\t\t},\n\n\t\t\t// -------- Media ----------\n\n\t\t\t{\n\t\t\t\tname:        \"Media/AudioOnly\",\n\t\t\t\trequestType: types.RequestTypeMedia,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t\taudioRoutes: []*livekit.AudioRoute{{\n\t\t\t\t\t\tMatch: &livekit.AudioRoute_TrackId{TrackId: \"set-at-runtime\"},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\tencodingOptions: &livekit.EncodingOptions{\n\t\t\t\t\tAudioCodec: livekit.AudioCodec_OPUS,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"media_audio_{time}\",\n\t\t\t\t\tfileType: livekit.EncodedFileType_OGG,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.audioOnlyContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"Media/VideoOnly\",\n\t\t\t\trequestType: types.RequestTypeMedia,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\tvideoOnly:  true,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"media_video_{time}.mp4\",\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.videoOnlyContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"Media/Base\",\n\t\t\t\trequestType: types.RequestTypeMedia,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\taudioRoutes: []*livekit.AudioRoute{{\n\t\t\t\t\t\tMatch: &livekit.AudioRoute_TrackId{TrackId: \"set-at-runtime\"},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"media_{time}.mp4\",\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.fullContentCheck,\n\t\t\t},\n\n\t\t\t// ---- Media Audio Routing ----\n\n\t\t\t{\n\t\t\t\tname:        \"Media/AudioRouteByTrackID\",\n\t\t\t\trequestType: types.RequestTypeMedia,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\taudioRoutes: []*livekit.AudioRoute{{\n\t\t\t\t\t\tMatch:   &livekit.AudioRoute_TrackId{TrackId: \"set-at-runtime\"},\n\t\t\t\t\t\tChannel: livekit.AudioChannel_AUDIO_CHANNEL_LEFT,\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"media_route_trackid_{time}.mp4\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"Media/AudioRouteByParticipantIdentity\",\n\t\t\t\trequestType: types.RequestTypeMedia,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\taudioRoutes: []*livekit.AudioRoute{{\n\t\t\t\t\t\tMatch:   &livekit.AudioRoute_ParticipantIdentity{ParticipantIdentity: \"set-at-runtime\"},\n\t\t\t\t\t\tChannel: livekit.AudioChannel_AUDIO_CHANNEL_BOTH,\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"media_route_identity_{time}.mp4\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"Media/AudioRouteByParticipantKind\",\n\t\t\t\trequestType: types.RequestTypeMedia,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\taudioRoutes: []*livekit.AudioRoute{{\n\t\t\t\t\t\tMatch:   &livekit.AudioRoute_ParticipantKind{ParticipantKind: livekit.ParticipantInfo_STANDARD},\n\t\t\t\t\t\tChannel: livekit.AudioChannel_AUDIO_CHANNEL_BOTH,\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"media_route_kind_{time}.mp4\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"Media/MultiRoute\",\n\t\t\t\trequestType: types.RequestTypeMedia,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\taudioRoutes: []*livekit.AudioRoute{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMatch:   &livekit.AudioRoute_TrackId{TrackId: \"set-at-runtime\"},\n\t\t\t\t\t\t\tChannel: livekit.AudioChannel_AUDIO_CHANNEL_LEFT,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tMatch:   &livekit.AudioRoute_ParticipantIdentity{ParticipantIdentity: \"set-at-runtime\"},\n\t\t\t\t\t\t\tChannel: livekit.AudioChannel_AUDIO_CHANNEL_RIGHT,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"media_multiroute_{time}.mp4\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"Media/ParticipantVideo\",\n\t\t\t\trequestType: types.RequestTypeMedia,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\tmediaParticipantVideo: &livekit.ParticipantVideo{\n\t\t\t\t\t\tIdentity: \"set-at-runtime\",\n\t\t\t\t\t},\n\t\t\t\t\taudioRoutes: []*livekit.AudioRoute{{\n\t\t\t\t\t\tMatch: &livekit.AudioRoute_TrackId{TrackId: \"set-at-runtime\"},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"media_participant_video_{time}.mp4\",\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.fullContentCheck,\n\t\t\t},\n\t\t} {\n\t\t\tif !r.run(t, test, r.runFileTest) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc (r *Runner) runFileTest(t *testing.T, test *testCase) {\n\treq := r.buildRequest(test)\n\n\t// start\n\tegressID := r.startEgress(t, req)\n\n\ttime.Sleep(time.Second * 10)\n\tif r.Dotfiles {\n\t\tr.createDotFile(t, egressID)\n\t}\n\n\t// stop\n\ttime.Sleep(time.Second * 15)\n\tres := r.stopEgress(t, egressID)\n\n\t// get params\n\tp, err := config.GetValidatedPipelineConfig(r.ServiceConfig, req)\n\trequire.NoError(t, err)\n\tif p.GetFileConfig().OutputType == types.OutputTypeUnknownFile {\n\t\tp.GetFileConfig().OutputType = test.fileOptions.outputType\n\t}\n\n\trequire.Equal(t, test.requestType != types.RequestTypeTrack && !test.audioOnly, p.VideoEncoding)\n\n\t// verify\n\tr.verifyFile(t, test, p, res)\n}\n\nfunc (r *Runner) verifyFile(t *testing.T, tc *testCase, p *config.PipelineConfig, res *livekit.EgressInfo) {\n\t// egress info\n\trequire.Equal(t, res.Error == \"\", res.Status != livekit.EgressStatus_EGRESS_FAILED)\n\trequire.NotZero(t, res.StartedAt)\n\trequire.NotZero(t, res.EndedAt)\n\n\t// file info\n\tfileRes := res.GetFile() //nolint:staticcheck\n\tif fileRes == nil {\n\t\trequire.Len(t, res.FileResults, 1)\n\t\tfileRes = res.FileResults[0]\n\t}\n\n\trequire.NotEmpty(t, fileRes.Location)\n\trequire.Greater(t, fileRes.Size, int64(0))\n\trequire.Greater(t, fileRes.Duration, int64(0))\n\n\tstoragePath := fileRes.Filename\n\trequire.NotEmpty(t, storagePath)\n\trequire.False(t, strings.Contains(storagePath, \"{\"))\n\tstorageFilename := path.Base(storagePath)\n\n\t// download from cloud storage\n\tlocalPath := path.Join(r.FilePrefix, storageFilename)\n\tdownload(t, p.GetFileConfig().StorageConfig, localPath, storagePath, false)\n\n\tmanifestLocal := path.Join(path.Dir(localPath), res.EgressId+\".json\")\n\tmanifestStorage := path.Join(path.Dir(storagePath), res.EgressId+\".json\")\n\tmanifest := loadManifest(t, p.GetFileConfig().StorageConfig, manifestLocal, manifestStorage)\n\trequire.NotNil(t, manifest)\n\n\t// verify\n\tinfo := verify(t, localPath, p, res, types.EgressTypeFile, r.Muting, r.sourceFramerate, false)\n\n\tif tc.contentCheck != nil && info != nil {\n\t\ttc.contentCheck(t, localPath, info)\n\t}\n}\n"
  },
  {
    "path": "test/flags.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport \"github.com/livekit/egress/pkg/types\"\n\nconst (\n\trunRoom           = 0b1 << 0\n\trunWeb            = 0b1 << 1\n\trunParticipant    = 0b1 << 2\n\trunTrackComposite = 0b1 << 3\n\trunTrack          = 0b1 << 4\n\trunTemplate       = 0b1 << 5\n\trunMedia          = 0b1 << 6\n\n\trunAllRequests = 0b1111111\n\n\trunFile     = 0b1 << 31\n\trunStream   = 0b1 << 30\n\trunSegments = 0b1 << 29\n\trunImages   = 0b1 << 28\n\trunMulti    = 0b1 << 27\n\trunEdge     = 0b1 << 26\n\n\trunAllOutputs = 0b111111 << 26\n)\n\nvar runRequestType = map[types.RequestType]uint{\n\ttypes.RequestTypeRoomComposite:  runRoom,\n\ttypes.RequestTypeWeb:            runWeb,\n\ttypes.RequestTypeParticipant:    runParticipant,\n\ttypes.RequestTypeTrackComposite: runTrackComposite,\n\ttypes.RequestTypeTrack:          runTrack,\n\ttypes.RequestTypeTemplate:       runTemplate,\n\ttypes.RequestTypeMedia:          runMedia,\n}\n\nfunc (r *Runner) updateFlagset() {\n\tswitch {\n\tcase r.RoomTestsOnly:\n\t\tr.shouldRun |= runRoom\n\tcase r.ParticipantTestsOnly:\n\t\tr.shouldRun |= runParticipant\n\tcase r.WebTestsOnly:\n\t\tr.shouldRun |= runWeb\n\tcase r.TrackCompositeTestsOnly:\n\t\tr.shouldRun |= runTrackComposite\n\tcase r.TrackTestsOnly:\n\t\tr.shouldRun |= runTrack\n\tcase r.TemplateTestsOnly:\n\t\tr.shouldRun |= runTemplate\n\tcase r.MediaTestsOnly:\n\t\tr.shouldRun |= runMedia\n\tdefault:\n\t\tr.shouldRun |= runAllRequests\n\t}\n\n\tswitch {\n\tcase r.FileTestsOnly:\n\t\tr.shouldRun |= runFile\n\tcase r.StreamTestsOnly:\n\t\tr.shouldRun |= runStream\n\tcase r.SegmentTestsOnly:\n\t\tr.shouldRun |= runSegments\n\tcase r.ImageTestsOnly:\n\t\tr.shouldRun |= runImages\n\tcase r.MultiTestsOnly:\n\t\tr.shouldRun |= runMulti\n\tcase r.EdgeCasesOnly:\n\t\tr.shouldRun |= runEdge\n\tdefault:\n\t\tr.shouldRun |= runAllOutputs\n\t}\n}\n\nfunc (r *Runner) should(runFlag uint) bool {\n\treturn r.shouldRun&runFlag > 0\n}\n"
  },
  {
    "path": "test/images.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n)\n\nfunc (r *Runner) testImages(t *testing.T) {\n\tif !r.should(runImages) {\n\t\treturn\n\t}\n\n\tt.Run(\"Images\", func(t *testing.T) {\n\t\tfor _, test := range []*testCase{\n\n\t\t\t// ---- Room Composite -----\n\n\t\t\t{\n\t\t\t\tname:        \"RoomComposite\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\tlayout:     \"speaker\",\n\t\t\t\t},\n\t\t\t\tencodingOptions: &livekit.EncodingOptions{\n\t\t\t\t\tWidth:  640,\n\t\t\t\t\tHeight: 360,\n\t\t\t\t},\n\t\t\t\timageOptions: &imageOptions{\n\t\t\t\t\tprefix: \"r_{room_name}_{time}\",\n\t\t\t\t\tsuffix: livekit.ImageFileSuffix_IMAGE_SUFFIX_TIMESTAMP,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t// ---- Track Composite ----\n\n\t\t\t{\n\t\t\t\tname:        \"TrackComposite/H264\",\n\t\t\t\trequestType: types.RequestTypeTrackComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t},\n\t\t\t\timageOptions: &imageOptions{\n\t\t\t\t\tprefix: \"tc_{publisher_identity}_h264\",\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t// -------- Media ----------\n\n\t\t\t{\n\t\t\t\tname:        \"Media\",\n\t\t\t\trequestType: types.RequestTypeMedia,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t\tvideoOnly:  true,\n\t\t\t\t},\n\t\t\t\timageOptions: &imageOptions{\n\t\t\t\t\tprefix: \"media_{room_name}_{time}\",\n\t\t\t\t\tsuffix: livekit.ImageFileSuffix_IMAGE_SUFFIX_TIMESTAMP,\n\t\t\t\t},\n\t\t\t},\n\t\t} {\n\t\t\tif !r.run(t, test, r.runImagesTest) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc (r *Runner) runImagesTest(t *testing.T, test *testCase) {\n\treq := r.buildRequest(test)\n\n\tegressID := r.startEgress(t, req)\n\n\ttime.Sleep(time.Second * 10)\n\tif r.Dotfiles {\n\t\tr.createDotFile(t, egressID)\n\t}\n\n\t// stop\n\ttime.Sleep(time.Second * 15)\n\tres := r.stopEgress(t, egressID)\n\n\t// get params\n\tp, err := config.GetValidatedPipelineConfig(r.ServiceConfig, req)\n\trequire.NoError(t, err)\n\n\tr.verifyImages(t, p, res)\n}\n\nfunc (r *Runner) verifyImages(t *testing.T, p *config.PipelineConfig, res *livekit.EgressInfo) {\n\t// egress info\n\trequire.Equal(t, res.Error == \"\", res.Status != livekit.EgressStatus_EGRESS_FAILED)\n\trequire.NotZero(t, res.StartedAt)\n\trequire.NotZero(t, res.EndedAt)\n\n\t// image info\n\trequire.Len(t, res.GetImageResults(), 1)\n\timages := res.GetImageResults()[0]\n\n\trequire.Greater(t, images.ImageCount, int64(0))\n\n\timageConfig := p.GetImageConfigs()[0]\n\tfor i := range images.ImageCount {\n\t\tstoragePath := fmt.Sprintf(\"%s_%05d%s\", images.FilenamePrefix, i, imageConfig.ImageExtension)\n\t\tlocalPath := path.Join(r.FilePrefix, path.Base(storagePath))\n\t\tdownload(t, imageConfig.StorageConfig, localPath, storagePath, true)\n\t}\n}\n"
  },
  {
    "path": "test/integration.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/rpc\"\n)\n\nvar uploadPrefix = fmt.Sprintf(\"integration/%s\", time.Now().Format(\"2006-01-02\"))\n\nfunc (r *Runner) RunTests(t *testing.T) {\n\t// run tests\n\tr.testFile(t)\n\tr.testStream(t)\n\tr.testSegments(t)\n\tr.testImages(t)\n\tr.testMulti(t)\n\tr.testEdgeCases(t)\n}\n\nfunc (r *Runner) run(t *testing.T, test *testCase, f func(*testing.T, *testCase)) bool {\n\tif !r.should(runRequestType[test.requestType]) {\n\t\treturn true\n\t}\n\n\tswitch test.requestType {\n\tcase types.RequestTypeRoomComposite, types.RequestTypeWeb:\n\t\tr.sourceFramerate = 30\n\tcase types.RequestTypeParticipant, types.RequestTypeTrackComposite, types.RequestTypeTrack, types.RequestTypeMedia:\n\t\tr.sourceFramerate = 23.97\n\tcase types.RequestTypeTemplate:\n\t\tif test.audioOnly && test.layout == \"\" && test.templateCustomBaseUrl == \"\" {\n\t\t\tr.sourceFramerate = 23.97\n\t\t} else {\n\t\t\tr.sourceFramerate = 30\n\t\t}\n\t}\n\n\tr.awaitIdle(t)\n\tr.ensureRoomForTest(t, test)\n\n\tr.testNumber++\n\tt.Run(fmt.Sprintf(\"%d/%s\", r.testNumber, test.name), func(t *testing.T) {\n\t\taudioMuting := r.Muting\n\t\tvideoMuting := r.Muting && test.audioCodec == \"\"\n\n\t\ttest.audioTrackID = r.publishSample(t, test.audioCodec, test.audioDelay, test.audioUnpublish, audioMuting)\n\t\tif test.audioRepublish != 0 {\n\t\t\tr.publishSample(t, test.audioCodec, test.audioRepublish, 0, audioMuting)\n\t\t}\n\t\ttest.videoTrackID = r.publishSample(t, test.videoCodec, test.videoDelay, test.videoUnpublish, videoMuting)\n\t\tif test.videoRepublish != 0 {\n\t\t\tr.publishSample(t, test.videoCodec, test.videoRepublish, 0, videoMuting)\n\t\t}\n\n\t\tlogger.Infow(\"test publish summary\",\n\t\t\t\"test\", test.name,\n\t\t\t\"room\", r.RoomName,\n\t\t\t\"audioCodec\", test.audioCodec,\n\t\t\t\"audioTrackID\", test.audioTrackID,\n\t\t\t\"videoCodec\", test.videoCodec,\n\t\t\t\"videoTrackID\", test.videoTrackID,\n\t\t)\n\n\t\tf(t, test)\n\t})\n\n\treturn !r.Short\n}\n\nfunc (r *Runner) ensureRoomForTest(t *testing.T, test *testCase) {\n\tdesiredRoom := r.RoomBaseName\n\tvar codecs []livekit.Codec\n\tswitch test.audioCodec {\n\tcase types.MimeTypePCMU:\n\t\tdesiredRoom = fmt.Sprintf(\"%s-pcmu\", r.RoomBaseName)\n\t\tcodecs = []livekit.Codec{{\n\t\t\tMime: string(types.MimeTypePCMU),\n\t\t}}\n\tcase types.MimeTypePCMA:\n\t\tdesiredRoom = fmt.Sprintf(\"%s-pcma\", r.RoomBaseName)\n\t\tcodecs = []livekit.Codec{{\n\t\t\tMime: string(types.MimeTypePCMA),\n\t\t}}\n\t}\n\n\tif desiredRoom == \"\" || desiredRoom == r.RoomName {\n\t\treturn\n\t}\n\n\tr.connectRoom(t, desiredRoom, codecs)\n}\n\nfunc (r *Runner) awaitIdle(t *testing.T) {\n\tr.svc.KillAll()\n\tfor i := 0; i < 30; i++ {\n\t\tif r.svc.IsIdle() && len(r.room.LocalParticipant.TrackPublications()) == 0 {\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n\n\tif !r.svc.IsIdle() {\n\t\tt.Fatal(\"service not idle after 30s\")\n\t} else if len(r.room.LocalParticipant.TrackPublications()) != 0 {\n\t\tt.Fatal(\"room still has tracks after 30s\")\n\t}\n}\n\nfunc (r *Runner) startEgress(t *testing.T, req *rpc.StartEgressRequest) string {\n\tinfo := r.sendRequest(t, req)\n\n\t// check status\n\tif r.HealthPort != 0 {\n\t\tstatus := r.getStatus(t)\n\t\trequire.Contains(t, status, info.EgressId)\n\t}\n\n\t// wait\n\ttime.Sleep(time.Second * 5)\n\n\t// check active update\n\tr.checkUpdate(t, info.EgressId, livekit.EgressStatus_EGRESS_ACTIVE)\n\n\treturn info.EgressId\n}\n\nfunc (r *Runner) sendRequest(t *testing.T, req *rpc.StartEgressRequest) *livekit.EgressInfo {\n\t// send start request\n\tinfo, err := r.StartEgress(context.Background(), req)\n\n\t// check returned egress info\n\trequire.NoError(t, err)\n\trequire.Empty(t, info.Error)\n\trequire.NotEmpty(t, info.EgressId)\n\tswitch req.Request.(type) {\n\tcase *rpc.StartEgressRequest_Web:\n\t\trequire.Empty(t, info.RoomName)\n\tcase *rpc.StartEgressRequest_Replay:\n\t\treplayReq := req.Request.(*rpc.StartEgressRequest_Replay).Replay\n\t\tif _, ok := replayReq.Source.(*livekit.ExportReplayRequest_Web); ok {\n\t\t\trequire.Empty(t, info.RoomName)\n\t\t}\n\tdefault:\n\t\trequire.Equal(t, r.RoomName, info.RoomName)\n\t}\n\n\trequire.Equal(t, livekit.EgressStatus_EGRESS_STARTING.String(), info.Status.String())\n\treturn info\n}\n\nfunc (r *Runner) checkUpdate(t *testing.T, egressID string, status livekit.EgressStatus) *livekit.EgressInfo {\n\tinfo := r.getUpdate(t, egressID)\n\n\trequire.Equal(t, status.String(), info.Status.String(), info.Error)\n\trequire.Equal(t, info.Status == livekit.EgressStatus_EGRESS_FAILED, info.Error != \"\")\n\n\treturn info\n}\n\nfunc (r *Runner) checkStreamUpdate(t *testing.T, egressID string, expected map[string]livekit.StreamInfo_Status) {\n\tfor {\n\t\tinfo := r.getUpdate(t, egressID)\n\t\tif len(expected) != len(info.StreamResults) {\n\t\t\tcontinue\n\t\t}\n\t\trequire.Equal(t, len(expected), len(info.StreamResults))\n\n\t\tcheckNext := false\n\t\tfor _, s := range info.StreamResults {\n\t\t\trequire.Equal(t, s.Status == livekit.StreamInfo_FAILED, s.Error != \"\")\n\t\t\tif expected[s.Url] > s.Status {\n\t\t\t\tlogger.Debugw(fmt.Sprintf(\"stream status %s, expecting %s\", s.Status.String(), expected[s.Url].String()))\n\t\t\t\tcheckNext = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\trequire.Equal(t, expected[s.Url], s.Status)\n\t\t}\n\n\t\tif !checkNext {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (r *Runner) getUpdate(t *testing.T, egressID string) *livekit.EgressInfo {\n\tfor {\n\t\tselect {\n\t\tcase info := <-r.updates:\n\t\t\tif info.EgressId == egressID {\n\t\t\t\treturn info\n\t\t\t}\n\n\t\tcase <-time.After(time.Second * 30):\n\t\t\tr.createDotFile(t, egressID)\n\t\t\tt.Fatal(\"no update from results channel\")\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (r *Runner) getStatus(t *testing.T) map[string]interface{} {\n\tb, err := r.svc.Status()\n\trequire.NoError(t, err)\n\n\tstatus := make(map[string]interface{})\n\terr = json.Unmarshal(b, &status)\n\trequire.NoError(t, err)\n\n\treturn status\n}\n\nfunc (r *Runner) createDotFile(t *testing.T, egressID string) {\n\tdot, err := r.svc.GetGstPipelineDotFile(egressID)\n\trequire.NoError(t, err)\n\n\tfilename := strings.ReplaceAll(t.Name()[11:], \"/\", \"_\")\n\tfilepath := fmt.Sprintf(\"%s/%s.dot\", r.FilePrefix, filename)\n\tf, err := os.Create(filepath)\n\trequire.NoError(t, err)\n\tdefer f.Close()\n\n\t_, err = f.WriteString(dot)\n\trequire.NoError(t, err)\n}\n\nfunc (r *Runner) stopEgress(t *testing.T, egressID string) *livekit.EgressInfo {\n\t// send stop request\n\tinfo, err := r.client.StopEgress(context.Background(), egressID, &livekit.StopEgressRequest{\n\t\tEgressId: egressID,\n\t})\n\n\t// check returned egress info\n\trequire.NoError(t, err)\n\trequire.Empty(t, info.Error)\n\trequire.NotEmpty(t, info.StartedAt)\n\trequire.Equal(t, livekit.EgressStatus_EGRESS_ENDING.String(), info.Status.String())\n\n\t// check ending update\n\tr.checkUpdate(t, egressID, livekit.EgressStatus_EGRESS_ENDING)\n\n\t// get final info\n\tres := r.checkUpdate(t, egressID, livekit.EgressStatus_EGRESS_COMPLETE)\n\n\t// check status\n\tif r.HealthPort != 0 {\n\t\tstatus := r.getStatus(t)\n\t\trequire.Len(t, status, 1)\n\t}\n\n\treturn res\n}\n"
  },
  {
    "path": "test/integration_test.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"embed\"\n\t\"io/fs\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/info\"\n\t\"github.com/livekit/egress/pkg/server\"\n\t\"github.com/livekit/protocol/redis\"\n\t\"github.com/livekit/psrpc\"\n)\n\nvar (\n\t//go:embed templates\n\ttemplateEmbedFs embed.FS\n)\n\nfunc TestEgress(t *testing.T) {\n\tr := NewRunner(t)\n\n\trfs, err := fs.Sub(templateEmbedFs, \"templates\")\n\trequire.NoError(t, err)\n\n\t// rpc client and server\n\trc, err := redis.GetRedisClient(r.Redis)\n\trequire.NoError(t, err)\n\tbus := psrpc.NewRedisMessageBus(rc)\n\n\tioClient, err := info.NewSessionReporter(&r.BaseConfig, bus)\n\trequire.NoError(t, err)\n\n\tsvc, err := server.NewServer(r.ServiceConfig, bus, ioClient)\n\trequire.NoError(t, err)\n\n\tr.StartServer(t, svc, bus, rfs)\n\tr.RunTests(t)\n}\n"
  },
  {
    "path": "test/ioserver.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"context\"\n\n\t\"google.golang.org/protobuf/types/known/emptypb\"\n\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/rpc\"\n\t\"github.com/livekit/psrpc\"\n)\n\ntype ioTestServer struct {\n\trpc.IOInfoServerImpl\n\tserver  rpc.IOInfoServer\n\tupdates chan *livekit.EgressInfo\n}\n\nfunc newIOTestServer(bus psrpc.MessageBus, updates chan *livekit.EgressInfo) (*ioTestServer, error) {\n\ts := &ioTestServer{\n\t\tupdates: updates,\n\t}\n\tserver, err := rpc.NewIOInfoServer(s, bus)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.server = server\n\treturn s, nil\n}\n\nfunc (s *ioTestServer) CreateEgress(_ context.Context, info *livekit.EgressInfo) (*emptypb.Empty, error) {\n\tlogger.Infow(\"egress created\", \"egressID\", info.EgressId)\n\treturn &emptypb.Empty{}, nil\n}\n\nfunc (s *ioTestServer) UpdateEgress(_ context.Context, info *livekit.EgressInfo) (*emptypb.Empty, error) {\n\tlogger.Infow(\"egress updated\", \"egressID\", info.EgressId, \"status\", info.Status)\n\ts.updates <- info\n\treturn &emptypb.Empty{}, nil\n}\n\nfunc (s *ioTestServer) UpdateMetrics(_ context.Context, _ *rpc.UpdateMetricsRequest) (*emptypb.Empty, error) {\n\treturn &emptypb.Empty{}, nil\n}\n"
  },
  {
    "path": "test/multi.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"context\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n)\n\nfunc (r *Runner) testMulti(t *testing.T) {\n\tif !r.should(runMulti) {\n\t\treturn\n\t}\n\n\tt.Run(\"Multi\", func(t *testing.T) {\n\t\tfor _, test := range []*testCase{\n\n\t\t\t// ---- Room Composite -----\n\n\t\t\t{\n\t\t\t\tname:        \"RoomComposite\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite, publishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeVP8,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"rc_multiple_{time}\",\n\t\t\t\t},\n\t\t\t\timageOptions: &imageOptions{\n\t\t\t\t\tprefix: \"rc_image\",\n\t\t\t\t},\n\t\t\t\tmulti: true,\n\t\t\t},\n\n\t\t\t// ---------- Web ----------\n\n\t\t\t{\n\t\t\t\tname:        \"Web\",\n\t\t\t\trequestType: types.RequestTypeWeb,\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"web_multiple_{time}\",\n\t\t\t\t},\n\t\t\t\tsegmentOptions: &segmentOptions{\n\t\t\t\t\tprefix:   \"web_multiple_{time}\",\n\t\t\t\t\tplaylist: \"web_multiple_{time}.m3u8\",\n\t\t\t\t},\n\t\t\t\tmulti: true,\n\t\t\t},\n\n\t\t\t// ------ Participant ------\n\n\t\t\t{\n\t\t\t\tname:        \"ParticipantComposite\",\n\t\t\t\trequestType: types.RequestTypeParticipant, publishOptions: publishOptions{\n\t\t\t\t\taudioCodec:     types.MimeTypeOpus,\n\t\t\t\t\taudioUnpublish: time.Second * 20,\n\t\t\t\t\tvideoCodec:     types.MimeTypeVP8,\n\t\t\t\t\tvideoDelay:     time.Second * 5,\n\t\t\t\t},\n\t\t\t\tfileOptions: &fileOptions{\n\t\t\t\t\tfilename: \"participant_{publisher_identity}_multi_{time}.mp4\",\n\t\t\t\t},\n\t\t\t\tstreamOptions: &streamOptions{\n\t\t\t\t\toutputType: types.OutputTypeRTMP,\n\t\t\t\t},\n\t\t\t\tsegmentOptions: &segmentOptions{\n\t\t\t\t\tprefix:   \"participant_{publisher_identity}_multi_{time}\",\n\t\t\t\t\tplaylist: \"participant_{publisher_identity}_multi_{time}.m3u8\",\n\t\t\t\t},\n\t\t\t\tmulti: true,\n\t\t\t},\n\n\t\t\t// ---- Track Composite ----\n\n\t\t\t{\n\t\t\t\tname:        \"TrackComposite\",\n\t\t\t\trequestType: types.RequestTypeTrackComposite, publishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeVP8,\n\t\t\t\t},\n\t\t\t\tstreamOptions: &streamOptions{\n\t\t\t\t\toutputType: types.OutputTypeRTMP,\n\t\t\t\t},\n\t\t\t\tsegmentOptions: &segmentOptions{\n\t\t\t\t\tprefix:   \"tc_multiple_{time}\",\n\t\t\t\t\tplaylist: \"tc_multiple_{time}.m3u8\",\n\t\t\t\t},\n\t\t\t\tmulti: true,\n\t\t\t},\n\t\t} {\n\t\t\tif !r.run(t, test, r.runMultiTest) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc (r *Runner) runMultiTest(t *testing.T, test *testCase) {\n\treq := r.build(test)\n\n\tegressID := r.startEgress(t, req)\n\ttime.Sleep(time.Second * 10)\n\n\t// get params\n\tp, err := config.GetValidatedPipelineConfig(r.ServiceConfig, req)\n\trequire.NoError(t, err)\n\n\tif test.streamOptions != nil {\n\t\t_, err = r.client.UpdateStream(context.Background(), egressID, &livekit.UpdateStreamRequest{\n\t\t\tEgressId:      egressID,\n\t\t\tAddOutputUrls: []string{rtmpUrl3},\n\t\t})\n\t\trequire.NoError(t, err)\n\n\t\ttime.Sleep(time.Second * 10)\n\t\tr.verifyStreams(t, nil, p, rtmpUrl3)\n\t\tr.checkStreamUpdate(t, egressID, map[string]livekit.StreamInfo_Status{\n\t\t\trtmpUrl3Redacted: livekit.StreamInfo_ACTIVE,\n\t\t})\n\t\ttime.Sleep(time.Second * 10)\n\t} else {\n\t\ttime.Sleep(time.Second * 20)\n\t}\n\n\tres := r.stopEgress(t, egressID)\n\tif test.fileOptions != nil {\n\t\tr.verifyFile(t, test, p, res)\n\t}\n\tif test.segmentOptions != nil {\n\t\trequire.Len(t, res.GetSegmentResults(), 1)\n\t\tsegments := res.GetSegmentResults()[0]\n\t\trequire.Greater(t, segments.Size, int64(0))\n\t\trequire.NotContains(t, segments.PlaylistName, \"{\")\n\t\trequire.NotContains(t, segments.PlaylistLocation, \"{\")\n\t\tif segments.LivePlaylistName != \"\" {\n\t\t\trequire.NotContains(t, segments.LivePlaylistName, \"{\")\n\t\t}\n\t\tif segments.LivePlaylistLocation != \"\" {\n\t\t\trequire.NotContains(t, segments.LivePlaylistLocation, \"{\")\n\t\t}\n\t\tr.verifySegments(t, test, p, test.segmentOptions.suffix, res, false)\n\t}\n\tif test.imageOptions != nil {\n\t\tr.verifyImages(t, p, res)\n\t}\n}\n"
  },
  {
    "path": "test/publish.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/types\"\n\tlksdk \"github.com/livekit/server-sdk-go/v2\"\n)\n\nvar (\n\tsamples = map[types.MimeType]string{\n\t\ttypes.MimeTypeOpus: \"/media-samples/avsync_minmotion_livekit_audio_48k_120s.ogg\",\n\t\ttypes.MimeTypeH264: \"/media-samples/avsync_minmotion_livekit_video_1080p25_120s.h264\",\n\t\ttypes.MimeTypeVP8:  \"/media-samples/avsync_minmotion_livekit_1080p24_vp8.ivf\",\n\t\ttypes.MimeTypeVP9:  \"/media-samples/avsync_minmotion_livekit_1080p24_vp9.ivf\",\n\t\ttypes.MimeTypePCMU: \"/media-samples/avsync_minmotion_livekit_audio_8k_120s_pcmu.wav\",\n\t\ttypes.MimeTypePCMA: \"/media-samples/avsync_minmotion_livekit_audio_8k_120s_pcma.wav\",\n\t}\n\n\tframeDurations = map[types.MimeType]time.Duration{\n\t\ttypes.MimeTypeH264: time.Microsecond * 41667,\n\t\ttypes.MimeTypeVP8:  time.Microsecond * 41667,\n\t\ttypes.MimeTypeVP9:  time.Microsecond * 41667,\n\t\ttypes.MimeTypePCMU: time.Millisecond * 20,\n\t\ttypes.MimeTypePCMA: time.Millisecond * 20,\n\t}\n)\n\nfunc (r *Runner) publishSample(t *testing.T, codec types.MimeType, publishAfter, unpublishAfter time.Duration, withMuting bool) string {\n\tif codec == \"\" {\n\t\treturn \"\"\n\t}\n\n\ttrackID := make(chan string, 1)\n\ttime.AfterFunc(publishAfter, func() {\n\t\tdone := make(chan struct{})\n\t\tunpublished := make(chan struct{})\n\n\t\tpub := r.publish(t, r.room.LocalParticipant, codec, done)\n\t\ttrackID <- pub.SID()\n\n\t\tif withMuting {\n\t\t\tgo func() {\n\t\t\t\tmuted := false\n\t\t\t\ttime.Sleep(time.Second * 15)\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-unpublished:\n\t\t\t\t\t\treturn\n\t\t\t\t\tcase <-done:\n\t\t\t\t\t\treturn\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tpub.SetMuted(!muted)\n\t\t\t\t\t\tmuted = !muted\n\t\t\t\t\t\ttime.Sleep(time.Second * 10)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\n\t\tif unpublishAfter != 0 {\n\t\t\ttime.AfterFunc(unpublishAfter-publishAfter, func() {\n\t\t\t\tselect {\n\t\t\t\tcase <-done:\n\t\t\t\t\treturn\n\t\t\t\tdefault:\n\t\t\t\t\tclose(unpublished)\n\t\t\t\t\t_ = r.room.LocalParticipant.UnpublishTrack(pub.SID())\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tif publishAfter == 0 {\n\t\treturn <-trackID\n\t}\n\treturn \"TBD\"\n}\n\nfunc (r *Runner) publishSampleWithDisconnection(t *testing.T, codec types.MimeType) string {\n\tpub := r.publish(t, r.room.LocalParticipant, codec, make(chan struct{}))\n\ttrackID := pub.SID()\n\n\ttime.AfterFunc(time.Second*10, func() {\n\t\tpub.SimulateDisconnection(time.Second * 10)\n\t})\n\n\treturn trackID\n}\n\nfunc (r *Runner) publish(t *testing.T, p *lksdk.LocalParticipant, codec types.MimeType, done chan struct{}) *lksdk.LocalTrackPublication {\n\tfilename := samples[codec]\n\tframeDuration := frameDurations[codec]\n\n\tvar pub *lksdk.LocalTrackPublication\n\topts := []lksdk.ReaderSampleProviderOption{\n\t\tlksdk.ReaderTrackWithOnWriteComplete(func() {\n\t\t\tclose(done)\n\t\t\tif pub != nil {\n\t\t\t\t_ = p.UnpublishTrack(pub.SID())\n\t\t\t}\n\t\t}),\n\t}\n\n\tif frameDuration != 0 {\n\t\topts = append(opts, lksdk.ReaderTrackWithFrameDuration(frameDuration))\n\t}\n\n\ttrack, err := lksdk.NewLocalFileTrack(filename, opts...)\n\trequire.NoError(t, err)\n\n\tpub, err = p.PublishTrack(track, &lksdk.TrackPublicationOptions{Name: filename})\n\trequire.NoError(t, err)\n\n\ttrackID := pub.SID()\n\tt.Cleanup(func() {\n\t\t_ = p.UnpublishTrack(trackID)\n\t})\n\n\treturn pub\n}\n"
  },
  {
    "path": "test/runner.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"math/rand\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"gopkg.in/yaml.v3\"\n\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/rpc\"\n\t\"github.com/livekit/psrpc\"\n\tlksdk \"github.com/livekit/server-sdk-go/v2\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n)\n\ntype Runner struct {\n\tStartEgress func(ctx context.Context, request *rpc.StartEgressRequest) (*livekit.EgressInfo, error) `yaml:\"-\"`\n\n\tsvc             Server                   `yaml:\"-\"`\n\tclient          rpc.EgressClient         `yaml:\"-\"`\n\troom            *lksdk.Room              `yaml:\"-\"`\n\tupdates         chan *livekit.EgressInfo `yaml:\"-\"`\n\tsourceFramerate float64                  `yaml:\"-\"`\n\ttestNumber      int                      `yaml:\"-\"`\n\n\t// service config\n\t*config.ServiceConfig `yaml:\",inline\"`\n\tS3Upload              *livekit.S3Upload        `yaml:\"-\"`\n\tGCPUpload             *livekit.GCPUpload       `yaml:\"-\"`\n\tAzureUpload           *livekit.AzureBlobUpload `yaml:\"-\"`\n\n\t// testing config\n\tFilePrefix   string `yaml:\"file_prefix\"`\n\tRoomName     string `yaml:\"room_name\"`\n\tRoomBaseName string `yaml:\"-\"`\n\tMuting       bool   `yaml:\"muting\"`\n\tDotfiles     bool   `yaml:\"dot_files\"`\n\tShort        bool   `yaml:\"short\"`\n\n\t// flagset used to determine which tests to run\n\tshouldRun uint `yaml:\"-\"`\n\n\tRoomTestsOnly           bool `yaml:\"room_only\"`\n\tWebTestsOnly            bool `yaml:\"web_only\"`\n\tParticipantTestsOnly    bool `yaml:\"participant_only\"`\n\tTrackCompositeTestsOnly bool `yaml:\"track_composite_only\"`\n\tTrackTestsOnly          bool `yaml:\"track_only\"`\n\tTemplateTestsOnly       bool `yaml:\"template_only\"`\n\tMediaTestsOnly          bool `yaml:\"media_only\"`\n\tEdgeCasesOnly           bool `yaml:\"edge_cases_only\"`\n\n\tFileTestsOnly    bool `yaml:\"file_only\"`\n\tStreamTestsOnly  bool `yaml:\"stream_only\"`\n\tSegmentTestsOnly bool `yaml:\"segments_only\"`\n\tImageTestsOnly   bool `yaml:\"images_only\"`\n\tMultiTestsOnly   bool `yaml:\"multi_only\"`\n}\n\ntype Server interface {\n\tStartTemplatesServer(fs.FS) error\n\tRun() error\n\tStatus() ([]byte, error)\n\tGetGstPipelineDotFile(string) (string, error)\n\tIsIdle() bool\n\tKillAll()\n\tShutdown(bool, bool)\n\tDrain()\n}\n\nfunc NewRunner(t *testing.T) *Runner {\n\tconfString := os.Getenv(\"EGRESS_CONFIG_STRING\")\n\tif confString == \"\" {\n\t\tconfFile := os.Getenv(\"EGRESS_CONFIG_FILE\")\n\t\trequire.NotEmpty(t, confFile)\n\t\tb, err := os.ReadFile(confFile)\n\t\trequire.NoError(t, err)\n\t\tconfString = string(b)\n\t}\n\n\tr := &Runner{}\n\terr := yaml.Unmarshal([]byte(confString), r)\n\trequire.NoError(t, err)\n\n\tswitch os.Getenv(\"INTEGRATION_TYPE\") {\n\tcase \"room\":\n\t\tr.RoomTestsOnly = true\n\t\tr.RoomName = fmt.Sprintf(\"room-integration-%d\", rand.Intn(100))\n\tcase \"web\":\n\t\tr.WebTestsOnly = true\n\t\tr.RoomName = fmt.Sprintf(\"web-integration-%d\", rand.Intn(100))\n\tcase \"participant\":\n\t\tr.ParticipantTestsOnly = true\n\t\tr.RoomName = fmt.Sprintf(\"participant-integration-%d\", rand.Intn(100))\n\tcase \"track_composite\":\n\t\tr.TrackCompositeTestsOnly = true\n\t\tr.RoomName = fmt.Sprintf(\"track-composite-integration-%d\", rand.Intn(100))\n\tcase \"track\":\n\t\tr.TrackTestsOnly = true\n\t\tr.RoomName = fmt.Sprintf(\"track-integration-%d\", rand.Intn(100))\n\tcase \"template\":\n\t\tr.TemplateTestsOnly = true\n\t\tr.RoomName = fmt.Sprintf(\"template-integration-%d\", rand.Intn(100))\n\tcase \"media\":\n\t\tr.MediaTestsOnly = true\n\t\tr.RoomName = fmt.Sprintf(\"media-integration-%d\", rand.Intn(100))\n\tcase \"file-room\":\n\t\tr.shouldRun = runFile | runRoom | runWeb | runTemplate\n\t\tr.RoomName = fmt.Sprintf(\"file-room-integration-%d\", rand.Intn(100))\n\tcase \"file-track\":\n\t\tr.shouldRun = runFile | runTrackComposite | runTrack\n\t\tr.RoomName = fmt.Sprintf(\"file-track-integration-%d\", rand.Intn(100))\n\tcase \"file-media\":\n\t\tr.shouldRun = runFile | runMedia | runParticipant\n\t\tr.RoomName = fmt.Sprintf(\"file-media-integration-%d\", rand.Intn(100))\n\tcase \"file\":\n\t\tr.FileTestsOnly = true\n\t\tr.RoomName = fmt.Sprintf(\"file-integration-%d\", rand.Intn(100))\n\tcase \"stream\":\n\t\tr.StreamTestsOnly = true\n\t\tr.RoomName = fmt.Sprintf(\"stream-integration-%d\", rand.Intn(100))\n\tcase \"segments\":\n\t\tr.SegmentTestsOnly = true\n\t\tr.RoomName = fmt.Sprintf(\"segments-integration-%d\", rand.Intn(100))\n\tcase \"images\":\n\t\tr.ImageTestsOnly = true\n\t\tr.RoomName = fmt.Sprintf(\"images-integration-%d\", rand.Intn(100))\n\tcase \"multi\":\n\t\tr.MultiTestsOnly = true\n\t\tr.RoomName = fmt.Sprintf(\"multi-integration-%d\", rand.Intn(100))\n\tcase \"edge\":\n\t\tr.EdgeCasesOnly = true\n\t\tr.RoomName = fmt.Sprintf(\"edge-integration-%d\", rand.Intn(100))\n\tdefault:\n\t\tif r.RoomName == \"\" {\n\t\t\tr.RoomName = fmt.Sprintf(\"egress-integration-%d\", rand.Intn(100))\n\t\t}\n\t}\n\n\tconf, err := config.NewServiceConfig(confString)\n\trequire.NoError(t, err)\n\n\tr.ServiceConfig = conf\n\n\tif conf.ApiKey == \"\" || conf.ApiSecret == \"\" || conf.WsUrl == \"\" {\n\t\tt.Fatal(\"api key, secret, and ws url required\")\n\t}\n\tif conf.Redis == nil {\n\t\tt.Fatal(\"redis required\")\n\t}\n\n\tif s3 := os.Getenv(\"S3_UPLOAD\"); s3 != \"\" {\n\t\tlogger.Infow(\"using s3 uploads\")\n\t\tr.S3Upload = &livekit.S3Upload{}\n\t\trequire.NoError(t, json.Unmarshal([]byte(s3), r.S3Upload))\n\t} else {\n\t\tlogger.Infow(\"no s3 config supplied\")\n\t}\n\n\tif gcp := os.Getenv(\"GCP_UPLOAD\"); gcp != \"\" {\n\t\tlogger.Infow(\"using gcp uploads\")\n\t\tr.GCPUpload = &livekit.GCPUpload{}\n\t\trequire.NoError(t, json.Unmarshal([]byte(gcp), r.GCPUpload))\n\t} else {\n\t\tlogger.Infow(\"no gcp config supplied\")\n\t}\n\n\tif azure := os.Getenv(\"AZURE_UPLOAD\"); azure != \"\" {\n\t\tlogger.Infow(\"using azure uploads\")\n\t\tr.AzureUpload = &livekit.AzureBlobUpload{}\n\t\trequire.NoError(t, json.Unmarshal([]byte(azure), r.AzureUpload))\n\t} else {\n\t\tlogger.Infow(\"no azure config supplied\")\n\t}\n\n\tif r.RoomBaseName == \"\" {\n\t\tr.RoomBaseName = r.RoomName\n\t}\n\n\tif r.shouldRun == 0 {\n\t\tr.updateFlagset()\n\t}\n\n\treturn r\n}\n\nfunc (r *Runner) connectRoom(t *testing.T, roomName string, codecs []livekit.Codec) {\n\tif r.room != nil {\n\t\tr.room.Disconnect()\n\t}\n\n\topts := []lksdk.ConnectOption{}\n\tif len(codecs) > 0 {\n\t\topts = append(opts, lksdk.WithCodecs(codecs))\n\t}\n\n\troom, err := lksdk.ConnectToRoom(r.WsUrl, lksdk.ConnectInfo{\n\t\tAPIKey:              r.ApiKey,\n\t\tAPISecret:           r.ApiSecret,\n\t\tRoomName:            roomName,\n\t\tParticipantName:     \"egress-sample\",\n\t\tParticipantIdentity: fmt.Sprintf(\"sample-%d\", rand.Intn(100)),\n\t}, lksdk.NewRoomCallback(), opts...)\n\trequire.NoError(t, err)\n\n\tr.room = room\n\tr.RoomName = roomName\n}\n\nfunc (r *Runner) StartServer(t *testing.T, svc Server, bus psrpc.MessageBus, templateFs fs.FS) {\n\tr.svc = svc\n\tt.Cleanup(func() {\n\t\tif r.room != nil {\n\t\t\tr.room.Disconnect()\n\t\t}\n\t\tr.svc.Shutdown(false, true)\n\t})\n\n\tr.connectRoom(t, r.RoomName, nil)\n\n\tpsrpcClient, err := rpc.NewEgressClient(rpc.ClientParams{Bus: bus})\n\trequire.NoError(t, err)\n\tr.StartEgress = func(ctx context.Context, req *rpc.StartEgressRequest) (*livekit.EgressInfo, error) {\n\t\treturn psrpcClient.StartEgress(ctx, \"\", req)\n\t}\n\n\t// start templates handler\n\terr = r.svc.StartTemplatesServer(templateFs)\n\trequire.NoError(t, err)\n\n\tgo r.svc.Run()\n\ttime.Sleep(time.Second * 3)\n\n\t// subscribe to update channel\n\tpsrpcUpdates := make(chan *livekit.EgressInfo, 100)\n\t_, err = newIOTestServer(bus, psrpcUpdates)\n\trequire.NoError(t, err)\n\n\t// update test config\n\tr.client = psrpcClient\n\tr.updates = psrpcUpdates\n\n\t// check status\n\tif r.HealthPort != 0 {\n\t\tstatus := r.getStatus(t)\n\t\trequire.Len(t, status, 1)\n\t\trequire.Contains(t, status, \"CpuLoad\")\n\t}\n}\n"
  },
  {
    "path": "test/segments.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/pipeline/sink/m3u8\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n)\n\nfunc (r *Runner) testSegments(t *testing.T) {\n\tif !r.should(runSegments) {\n\t\treturn\n\t}\n\n\tt.Run(\"Segments\", func(t *testing.T) {\n\t\tfor _, test := range []*testCase{\n\n\t\t\t// ---- Room Composite -----\n\n\t\t\t{\n\t\t\t\tname:        \"RoomComposite\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeVP8,\n\t\t\t\t\tlayout:     \"speaker\",\n\t\t\t\t},\n\t\t\t\tencodingOptions: &livekit.EncodingOptions{\n\t\t\t\t\tAudioCodec:   livekit.AudioCodec_AAC,\n\t\t\t\t\tVideoCodec:   livekit.VideoCodec_H264_BASELINE,\n\t\t\t\t\tWidth:        1920,\n\t\t\t\t\tHeight:       1080,\n\t\t\t\t\tVideoBitrate: 4500,\n\t\t\t\t},\n\t\t\t\tsegmentOptions: &segmentOptions{\n\t\t\t\t\tprefix:       \"r_{room_name}_{time}\",\n\t\t\t\t\tplaylist:     \"r_{room_name}_{time}.m3u8\",\n\t\t\t\t\tlivePlaylist: \"r_live_{room_name}_{time}.m3u8\",\n\t\t\t\t\tsuffix:       livekit.SegmentedFileSuffix_INDEX,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.fullContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"RoomComposite/AudioOnly\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t},\n\t\t\t\tencodingOptions: &livekit.EncodingOptions{\n\t\t\t\t\tAudioCodec: livekit.AudioCodec_AAC,\n\t\t\t\t},\n\t\t\t\tsegmentOptions: &segmentOptions{\n\t\t\t\t\tprefix:   \"r_{room_name}_audio_{time}\",\n\t\t\t\t\tplaylist: \"r_{room_name}_audio_{time}.m3u8\",\n\t\t\t\t\tsuffix:   livekit.SegmentedFileSuffix_TIMESTAMP,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.audioOnlyContentCheck,\n\t\t\t},\n\n\t\t\t// ---------- Web ----------\n\n\t\t\t{\n\t\t\t\tname:        \"Web\",\n\t\t\t\trequestType: types.RequestTypeWeb,\n\t\t\t\tsegmentOptions: &segmentOptions{\n\t\t\t\t\tprefix:   \"web_{time}\",\n\t\t\t\t\tplaylist: \"web_{time}.m3u8\",\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t// ------ Participant ------\n\n\t\t\t{\n\t\t\t\tname:        \"ParticipantComposite/VP8\",\n\t\t\t\trequestType: types.RequestTypeParticipant,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeVP8,\n\t\t\t\t\t// videoDelay:     time.Second * 10,\n\t\t\t\t\t// videoUnpublish: time.Second * 20,\n\t\t\t\t},\n\t\t\t\tsegmentOptions: &segmentOptions{\n\t\t\t\t\tprefix:   \"participant_{publisher_identity}_vp8_{time}\",\n\t\t\t\t\tplaylist: \"participant_{publisher_identity}_vp8_{time}.m3u8\",\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.fullContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"ParticipantComposite/H264\",\n\t\t\t\trequestType: types.RequestTypeParticipant,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec:     types.MimeTypeOpus,\n\t\t\t\t\taudioDelay:     time.Second * 10,\n\t\t\t\t\taudioUnpublish: time.Second * 20,\n\t\t\t\t\tvideoCodec:     types.MimeTypeH264,\n\t\t\t\t},\n\t\t\t\tsegmentOptions: &segmentOptions{\n\t\t\t\t\tprefix:   \"participant_{room_name}_h264_{time}\",\n\t\t\t\t\tplaylist: \"participant_{room_name}_h264_{time}.m3u8\",\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t// ---- Track Composite ----\n\n\t\t\t{\n\t\t\t\tname:        \"TrackComposite/H264\",\n\t\t\t\trequestType: types.RequestTypeTrackComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeH264,\n\t\t\t\t},\n\t\t\t\tsegmentOptions: &segmentOptions{\n\t\t\t\t\tprefix:       \"tcs_{room_name}_h264_{time}\",\n\t\t\t\t\tplaylist:     \"tcs_{room_name}_h264_{time}.m3u8\",\n\t\t\t\t\tlivePlaylist: \"tcs_live_{room_name}_h264_{time}.m3u8\",\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.fullContentCheck,\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"TrackComposite/AudioOnly\",\n\t\t\t\trequestType: types.RequestTypeTrackComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t},\n\t\t\t\tsegmentOptions: &segmentOptions{\n\t\t\t\t\tprefix:   \"tcs_{room_name}_audio_{time}\",\n\t\t\t\t\tplaylist: \"tcs_{room_name}_audio_{time}.m3u8\",\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.audioOnlyContentCheck,\n\t\t\t},\n\n\t\t\t// --------- Web V2 --------\n\n\t\t\t{\n\t\t\t\tname:        \"WebV2\",\n\t\t\t\trequestType: types.RequestTypeWeb,\n\t\t\t\tsegmentOptions: &segmentOptions{\n\t\t\t\t\tprefix:   \"webv2_{time}\",\n\t\t\t\t\tplaylist: \"webv2_{time}.m3u8\",\n\t\t\t\t},\n\t\t\t\tv2OutputOptions: &v2OutputOptions{},\n\t\t\t},\n\t\t} {\n\t\t\tif !r.run(t, test, r.runSegmentsTest) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc (r *Runner) runSegmentsTest(t *testing.T, test *testCase) {\n\treq := r.buildRequest(test)\n\n\tegressID := r.startEgress(t, req)\n\n\ttime.Sleep(time.Second * 10)\n\tif r.Dotfiles {\n\t\tr.createDotFile(t, egressID)\n\t}\n\n\t// stop\n\ttime.Sleep(time.Second * 15)\n\tres := r.stopEgress(t, egressID)\n\n\t// get params\n\tp, err := config.GetValidatedPipelineConfig(r.ServiceConfig, req)\n\trequire.NoError(t, err)\n\n\trequire.Equal(t, !test.audioOnly, p.VideoEncoding)\n\n\tr.verifySegments(t, test, p, test.segmentOptions.suffix, res, test.livePlaylist != \"\")\n}\n\nfunc (r *Runner) verifySegments(\n\tt *testing.T, tc *testCase, p *config.PipelineConfig,\n\tfilenameSuffix livekit.SegmentedFileSuffix,\n\tres *livekit.EgressInfo, enableLivePlaylist bool,\n) {\n\t// egress info\n\trequire.Equal(t, res.Error == \"\", res.Status != livekit.EgressStatus_EGRESS_FAILED)\n\trequire.NotZero(t, res.StartedAt)\n\trequire.NotZero(t, res.EndedAt)\n\n\t// segments info\n\trequire.Len(t, res.GetSegmentResults(), 1)\n\tsegments := res.GetSegmentResults()[0]\n\n\trequire.Greater(t, segments.Size, int64(0))\n\trequire.Greater(t, segments.Duration, int64(0))\n\n\tr.verifySegmentOutput(t, tc, p, filenameSuffix, segmentPlaylist{\n\t\tname:         segments.PlaylistName,\n\t\tlocation:     segments.PlaylistLocation,\n\t\tsegmentCount: int(segments.SegmentCount),\n\t\tplaylistType: m3u8.PlaylistTypeEvent,\n\t}, res)\n\tif enableLivePlaylist {\n\t\tr.verifySegmentOutput(t, tc, p, filenameSuffix, segmentPlaylist{\n\t\t\tname:         segments.LivePlaylistName,\n\t\t\tlocation:     segments.LivePlaylistLocation,\n\t\t\tsegmentCount: 5,\n\t\t\tplaylistType: m3u8.PlaylistTypeLive,\n\t\t}, res)\n\t}\n}\n\ntype segmentPlaylist struct {\n\tname         string\n\tlocation     string\n\tsegmentCount int\n\tplaylistType m3u8.PlaylistType\n}\n\nfunc (r *Runner) verifySegmentOutput(\n\tt *testing.T, tc *testCase, p *config.PipelineConfig,\n\tfilenameSuffix livekit.SegmentedFileSuffix,\n\tpl segmentPlaylist,\n\tres *livekit.EgressInfo,\n) {\n\n\trequire.NotEmpty(t, pl.name)\n\trequire.NotEmpty(t, pl.location)\n\n\tstoredPlaylistPath := pl.name\n\n\t// download from cloud storage\n\tlocalPlaylistPath := path.Join(r.FilePrefix, path.Base(storedPlaylistPath))\n\tdownload(t, p.GetSegmentConfig().StorageConfig, localPlaylistPath, storedPlaylistPath, false)\n\n\tif pl.playlistType == m3u8.PlaylistTypeEvent {\n\t\tmanifestLocal := path.Join(path.Dir(localPlaylistPath), res.EgressId+\".json\")\n\t\tmanifestStorage := path.Join(path.Dir(storedPlaylistPath), res.EgressId+\".json\")\n\t\tmanifest := loadManifest(t, p.GetSegmentConfig().StorageConfig, manifestLocal, manifestStorage)\n\n\t\tfor _, playlist := range manifest.Playlists {\n\t\t\trequire.Equal(t, pl.segmentCount, len(playlist.Segments))\n\t\t\tfor _, segment := range playlist.Segments {\n\t\t\t\tlocalPath := path.Join(r.FilePrefix, path.Base(segment.Filename))\n\t\t\t\tdownload(t, p.GetSegmentConfig().StorageConfig, localPath, segment.Filename, false)\n\t\t\t}\n\t\t}\n\t}\n\n\tverifyPlaylistProgramDateTime(t, filenameSuffix, localPlaylistPath, pl.playlistType)\n\n\t// verify\n\tinfo := verify(t, localPlaylistPath, p, res, types.EgressTypeSegments, r.Muting, r.sourceFramerate, pl.playlistType == m3u8.PlaylistTypeLive)\n\tif tc.contentCheck != nil && info != nil {\n\t\ttc.contentCheck(t, localPlaylistPath, info)\n\t}\n}\n\nfunc verifyPlaylistProgramDateTime(t *testing.T, filenameSuffix livekit.SegmentedFileSuffix, localPlaylistPath string, plType m3u8.PlaylistType) {\n\tp, err := readPlaylist(localPlaylistPath)\n\trequire.NoError(t, err)\n\trequire.Equal(t, string(plType), p.MediaType)\n\trequire.True(t, p.Closed)\n\n\tnow := time.Now()\n\n\tfor i, s := range p.Segments {\n\t\tconst leeway = 50 * time.Millisecond\n\n\t\t// Make sure the program date time is current, ie not more than 2 min in the past\n\t\trequire.InDelta(t, now.Unix(), s.ProgramDateTime.Unix(), 120)\n\n\t\tif filenameSuffix == livekit.SegmentedFileSuffix_TIMESTAMP {\n\t\t\tm := segmentTimeRegexp.FindStringSubmatch(s.Filename)\n\t\t\trequire.Equal(t, 3, len(m))\n\n\t\t\ttm, err := time.Parse(\"20060102150405\", m[1])\n\t\t\trequire.NoError(t, err)\n\n\t\t\tms, err := strconv.Atoi(m[2])\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttm = tm.Add(time.Duration(ms) * time.Millisecond)\n\n\t\t\trequire.InDelta(t, s.ProgramDateTime.UnixNano(), tm.UnixNano(), float64(time.Millisecond))\n\t\t}\n\n\t\tif i < len(p.Segments)-2 {\n\t\t\tnextSegmentStartDate := p.Segments[i+1].ProgramDateTime\n\n\t\t\tdateDuration := nextSegmentStartDate.Sub(s.ProgramDateTime)\n\t\t\trequire.InDelta(t, time.Duration(s.Duration*float64(time.Second)), dateDuration, float64(leeway))\n\t\t}\n\t}\n}\n\ntype Playlist struct {\n\tVersion        int\n\tMediaType      string\n\tTargetDuration int\n\tSegments       []*Segment\n\tClosed         bool\n}\n\ntype Segment struct {\n\tProgramDateTime time.Time\n\tDuration        float64\n\tFilename        string\n}\n\nfunc readPlaylist(filename string) (*Playlist, error) {\n\tb, err := os.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar segmentLineStart = 5\n\tvar i = 1\n\n\tlines := strings.Split(string(b), \"\\n\")\n\tversion, _ := strconv.Atoi(strings.Split(lines[i], \":\")[1])\n\ti++\n\tvar mediaType string\n\tif strings.Contains(string(b), \"#EXT-X-PLAYLIST-TYPE\") {\n\t\tmediaType = strings.Split(lines[i], \":\")[1]\n\t\tsegmentLineStart++\n\t\ti++\n\t}\n\ti++ // #EXT-X-ALLOW-CACHE:NO hardcoded\n\ttargetDuration, _ := strconv.Atoi(strings.Split(lines[i], \":\")[1])\n\n\tp := &Playlist{\n\t\tVersion:        version,\n\t\tMediaType:      mediaType,\n\t\tTargetDuration: targetDuration,\n\t\tSegments:       make([]*Segment, 0),\n\t}\n\n\tfor i = segmentLineStart; i < len(lines)-3; i += 3 {\n\t\tstartTime, _ := time.Parse(\"2006-01-02T15:04:05.999Z07:00\", strings.SplitN(lines[i], \":\", 2)[1])\n\t\tdurStr := strings.Split(lines[i+1], \":\")[1]\n\t\tdurStr = durStr[:len(durStr)-1] // remove trailing comma\n\t\tduration, _ := strconv.ParseFloat(durStr, 64)\n\n\t\tp.Segments = append(p.Segments, &Segment{\n\t\t\tProgramDateTime: startTime,\n\t\t\tDuration:        duration,\n\t\t\tFilename:        lines[i+2],\n\t\t})\n\t}\n\n\tif lines[len(lines)-2] == \"#EXT-X-ENDLIST\" {\n\t\tp.Closed = true\n\t}\n\n\treturn p, nil\n}\n"
  },
  {
    "path": "test/stream.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n//go:build integration\n\npackage test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"github.com/livekit/egress/pkg/config\"\n\t\"github.com/livekit/egress/pkg/types\"\n\t\"github.com/livekit/protocol/livekit\"\n\t\"github.com/livekit/protocol/logger\"\n\t\"github.com/livekit/protocol/utils\"\n)\n\nconst (\n\tbadRtmpUrl1         = \"rtmp://localhost:1936/wrong/stream\"\n\tbadRtmpUrl1Redacted = \"rtmp://localhost:1936/wrong/{st...am}\"\n\tbadRtmpUrl2         = \"rtmp://localhost:1936/live/stream\"\n\tbadRtmpUrl2Redacted = \"rtmp://localhost:1936/live/{st...am}\"\n\tbadSrtUrl1          = \"srt://localhost:8891?streamid=publish:wrongport&pkt_size=1316\"\n\tbadSrtUrl2          = \"srt://localhost:8891?streamid=publish:badstream&pkt_size=1316\"\n)\n\nvar (\n\tstreamKey1          = utils.NewGuid(\"\")\n\tstreamKey2          = utils.NewGuid(\"\")\n\tstreamKey3          = utils.NewGuid(\"\")\n\tstreamKey4          = utils.NewGuid(\"\")\n\trtmpUrl1            = fmt.Sprintf(\"rtmp://localhost:1935/live/%s\", streamKey1)\n\trtmpUrl2            = fmt.Sprintf(\"rtmp://localhost:1935/live/%s\", streamKey2)\n\trtmpUrl3            = fmt.Sprintf(\"rtmp://localhost:1935/live/%s\", streamKey3)\n\trtmpUrl4            = fmt.Sprintf(\"rtmp://localhost:1935/live/%s\", streamKey4)\n\trtmpUrl1Redacted, _ = utils.RedactStreamKey(rtmpUrl1)\n\trtmpUrl2Redacted, _ = utils.RedactStreamKey(rtmpUrl2)\n\trtmpUrl3Redacted, _ = utils.RedactStreamKey(rtmpUrl3)\n\trtmpUrl4Redacted, _ = utils.RedactStreamKey(rtmpUrl4)\n\tsrtPublishUrl1      = fmt.Sprintf(\"srt://localhost:8890?streamid=publish:%s&pkt_size=1316\", streamKey1)\n\tsrtReadUrl1         = fmt.Sprintf(\"srt://localhost:8890?streamid=read:%s\", streamKey1)\n\tsrtPublishUrl2      = fmt.Sprintf(\"srt://localhost:8890?streamid=publish:%s&pkt_size=1316\", streamKey2)\n\tsrtReadUrl2         = fmt.Sprintf(\"srt://localhost:8890?streamid=read:%s\", streamKey2)\n)\n\n// [[publish, redacted, verification]]\nvar streamUrls = map[types.OutputType][][]string{\n\ttypes.OutputTypeRTMP: {\n\t\t{rtmpUrl1, rtmpUrl1Redacted, rtmpUrl1},\n\t\t{badRtmpUrl1, badRtmpUrl1Redacted, \"\"},\n\t\t{rtmpUrl2, rtmpUrl2Redacted, rtmpUrl2},\n\t\t{badRtmpUrl2, badRtmpUrl2Redacted, \"\"},\n\t},\n\ttypes.OutputTypeSRT: {\n\t\t{srtPublishUrl1, srtPublishUrl1, srtReadUrl1},\n\t\t{badSrtUrl1, badSrtUrl1, \"\"},\n\t\t{srtPublishUrl2, srtPublishUrl2, srtReadUrl2},\n\t\t{badSrtUrl2, badSrtUrl2, \"\"},\n\t},\n}\n\nfunc (r *Runner) testStream(t *testing.T) {\n\tif !r.should(runStream) {\n\t\treturn\n\t}\n\n\tt.Run(\"Stream\", func(t *testing.T) {\n\t\tfor _, test := range []*testCase{\n\n\t\t\t// ---- Room Composite -----\n\n\t\t\t{\n\t\t\t\tname:        \"RoomComposite\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeVP8,\n\t\t\t\t\tlayout:     \"speaker\",\n\t\t\t\t},\n\t\t\t\tstreamOptions: &streamOptions{\n\t\t\t\t\tstreamUrls: []string{rtmpUrl1, badRtmpUrl1},\n\t\t\t\t\toutputType: types.OutputTypeRTMP,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tname:        \"RoomCompositeFixedKeyframeInterval\",\n\t\t\t\trequestType: types.RequestTypeRoomComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeVP8,\n\t\t\t\t\tlayout:     \"speaker\",\n\t\t\t\t},\n\t\t\t\tstreamOptions: &streamOptions{\n\t\t\t\t\tstreamUrls: []string{rtmpUrl1, badRtmpUrl1},\n\t\t\t\t\toutputType: types.OutputTypeRTMP,\n\t\t\t\t},\n\t\t\t\tencodingOptions: &livekit.EncodingOptions{\n\t\t\t\t\tKeyFrameInterval: 2,\n\t\t\t\t},\n\t\t\t\tcontentCheck: r.streamKeyframeContentCheck(2),\n\t\t\t},\n\n\t\t\t// ---------- Web ----------\n\n\t\t\t{\n\t\t\t\tname:        \"Web\",\n\t\t\t\trequestType: types.RequestTypeWeb,\n\t\t\t\tstreamOptions: &streamOptions{\n\t\t\t\t\tstreamUrls: []string{srtPublishUrl1, badSrtUrl1},\n\t\t\t\t\toutputType: types.OutputTypeSRT,\n\t\t\t\t},\n\t\t\t\tencodingOptions: &livekit.EncodingOptions{\n\t\t\t\t\tKeyFrameInterval: 2,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t// ------ Participant ------\n\n\t\t\t{\n\t\t\t\tname:        \"ParticipantComposite\",\n\t\t\t\trequestType: types.RequestTypeParticipant,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\taudioDelay: time.Second * 8,\n\t\t\t\t\tvideoCodec: types.MimeTypeVP8,\n\t\t\t\t},\n\t\t\t\tstreamOptions: &streamOptions{\n\t\t\t\t\tstreamUrls: []string{rtmpUrl1, badRtmpUrl1},\n\t\t\t\t\toutputType: types.OutputTypeRTMP,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t// ---- Track Composite ----\n\n\t\t\t{\n\t\t\t\tname:        \"TrackComposite\",\n\t\t\t\trequestType: types.RequestTypeTrackComposite,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeVP8,\n\t\t\t\t},\n\t\t\t\tstreamOptions: &streamOptions{\n\t\t\t\t\tstreamUrls: []string{rtmpUrl1, badRtmpUrl1},\n\t\t\t\t\toutputType: types.OutputTypeRTMP,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t// --------- Track ---------\n\n\t\t\t{\n\t\t\t\tname:        \"Track\",\n\t\t\t\trequestType: types.RequestTypeTrack,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\taudioOnly:  true,\n\t\t\t\t},\n\t\t\t\tstreamOptions: &streamOptions{\n\t\t\t\t\trawFileName: fmt.Sprintf(\"track-ws-%v.raw\", time.Now().Unix()),\n\t\t\t\t\toutputType:  types.OutputTypeRaw,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t// -------- Template --------\n\n\t\t\t{\n\t\t\t\tname:        \"Template\",\n\t\t\t\trequestType: types.RequestTypeTemplate,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeVP8,\n\t\t\t\t\tlayout:     \"speaker\",\n\t\t\t\t},\n\t\t\t\tstreamOptions: &streamOptions{\n\t\t\t\t\tstreamUrls: []string{rtmpUrl1, badRtmpUrl1},\n\t\t\t\t\toutputType: types.OutputTypeRTMP,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t// -------- Media ----------\n\n\t\t\t{\n\t\t\t\tname:        \"Media/ParticipantVideoStream\",\n\t\t\t\trequestType: types.RequestTypeMedia,\n\t\t\t\tpublishOptions: publishOptions{\n\t\t\t\t\taudioCodec: types.MimeTypeOpus,\n\t\t\t\t\tvideoCodec: types.MimeTypeVP8,\n\t\t\t\t\tmediaParticipantVideo: &livekit.ParticipantVideo{\n\t\t\t\t\t\tIdentity: \"set-at-runtime\",\n\t\t\t\t\t},\n\t\t\t\t\taudioRoutes: []*livekit.AudioRoute{{\n\t\t\t\t\t\tMatch: &livekit.AudioRoute_TrackId{TrackId: \"set-at-runtime\"},\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t\tstreamOptions: &streamOptions{\n\t\t\t\t\tstreamUrls: []string{rtmpUrl1, badRtmpUrl1},\n\t\t\t\t\toutputType: types.OutputTypeRTMP,\n\t\t\t\t},\n\t\t\t},\n\t\t} {\n\t\t\tif !r.run(t, test, r.runStreamTest) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc (r *Runner) runStreamTest(t *testing.T, test *testCase) {\n\tif test.requestType == types.RequestTypeTrack {\n\t\tr.runWebsocketTest(t, test)\n\t\treturn\n\t}\n\n\treq := r.buildRequest(test)\n\n\tctx := context.Background()\n\turls := streamUrls[test.streamOptions.outputType]\n\tegressID := r.startEgress(t, req)\n\n\tp, err := config.GetValidatedPipelineConfig(r.ServiceConfig, req)\n\trequire.NoError(t, err)\n\n\tif !test.audioOnly {\n\t\trequire.True(t, p.VideoEncoding)\n\t}\n\n\t// verify\n\ttime.Sleep(time.Second * 5)\n\tr.verifyStreams(t, test, p, urls[0][2])\n\tr.checkStreamUpdate(t, egressID, map[string]livekit.StreamInfo_Status{\n\t\turls[0][1]: livekit.StreamInfo_ACTIVE,\n\t\turls[1][1]: livekit.StreamInfo_FAILED,\n\t})\n\n\t// add one good stream url and one bad\n\t_, err = r.client.UpdateStream(ctx, egressID, &livekit.UpdateStreamRequest{\n\t\tEgressId:      egressID,\n\t\tAddOutputUrls: []string{urls[2][0], urls[3][0]},\n\t})\n\trequire.NoError(t, err)\n\ttime.Sleep(time.Second * 5)\n\n\t// verify\n\tr.verifyStreams(t, test, p, urls[0][2], urls[2][2])\n\tr.checkStreamUpdate(t, egressID, map[string]livekit.StreamInfo_Status{\n\t\turls[0][1]: livekit.StreamInfo_ACTIVE,\n\t\turls[1][1]: livekit.StreamInfo_FAILED,\n\t\turls[2][1]: livekit.StreamInfo_ACTIVE,\n\t\turls[3][1]: livekit.StreamInfo_FAILED,\n\t})\n\n\t// remove one of the stream urls\n\t_, err = r.client.UpdateStream(ctx, egressID, &livekit.UpdateStreamRequest{\n\t\tEgressId:         egressID,\n\t\tRemoveOutputUrls: []string{urls[0][0]},\n\t})\n\trequire.NoError(t, err)\n\n\ttime.Sleep(time.Second * 5)\n\tif r.Dotfiles {\n\t\tr.createDotFile(t, egressID)\n\t}\n\n\t// verify the remaining stream\n\tr.verifyStreams(t, test, p, urls[2][2])\n\tr.checkStreamUpdate(t, egressID, map[string]livekit.StreamInfo_Status{\n\t\turls[0][1]: livekit.StreamInfo_FINISHED,\n\t\turls[1][1]: livekit.StreamInfo_FAILED,\n\t\turls[2][1]: livekit.StreamInfo_ACTIVE,\n\t\turls[3][1]: livekit.StreamInfo_FAILED,\n\t})\n\n\t// stop\n\ttime.Sleep(time.Second * 5)\n\tres := r.stopEgress(t, egressID)\n\n\t// verify egress info\n\trequire.Empty(t, res.Error)\n\trequire.NotZero(t, res.StartedAt)\n\trequire.NotZero(t, res.EndedAt)\n\n\t// check stream info\n\trequire.Len(t, res.StreamResults, 4)\n\tfor _, info := range res.StreamResults {\n\t\trequire.NotZero(t, info.StartedAt)\n\t\trequire.NotZero(t, info.EndedAt)\n\n\t\tswitch info.Url {\n\t\tcase urls[0][1]:\n\t\t\trequire.Equal(t, livekit.StreamInfo_FINISHED.String(), info.Status.String())\n\t\t\trequire.Greater(t, float64(info.Duration)/1e9, 15.0)\n\n\t\tcase urls[2][1]:\n\t\t\trequire.Equal(t, livekit.StreamInfo_FINISHED.String(), info.Status.String())\n\t\t\trequire.Greater(t, float64(info.Duration)/1e9, 10.0)\n\n\t\tdefault:\n\t\t\trequire.Equal(t, livekit.StreamInfo_FAILED.String(), info.Status.String())\n\t\t}\n\t}\n}\n\nfunc (r *Runner) verifyStreams(t *testing.T, tc *testCase, p *config.PipelineConfig, urls ...string) {\n\tfor _, url := range urls {\n\t\tinfo := verify(t, url, p, nil, types.EgressTypeStream, false, r.sourceFramerate, false)\n\t\tif tc != nil && tc.contentCheck != nil && info != nil {\n\t\t\ttc.contentCheck(t, url, info)\n\t\t}\n\t}\n}\n\nfunc (r *Runner) runWebsocketTest(t *testing.T, test *testCase) {\n\tfilepath := path.Join(r.FilePrefix, test.rawFileName)\n\twss := newTestWebsocketServer(filepath)\n\ts := httptest.NewServer(http.HandlerFunc(wss.handleWebsocket))\n\ttest.websocketUrl = \"ws\" + strings.TrimPrefix(s.URL, \"http\")\n\tdefer func() {\n\t\twss.close()\n\t\ts.Close()\n\t}()\n\n\treq := r.build(test)\n\n\tegressID := r.startEgress(t, req)\n\n\tp, err := config.GetValidatedPipelineConfig(r.ServiceConfig, req)\n\trequire.NoError(t, err)\n\n\ttime.Sleep(time.Second * 30)\n\n\tres := r.stopEgress(t, egressID)\n\tverify(t, filepath, p, res, types.EgressTypeWebsocket, r.Muting, r.sourceFramerate, false)\n}\n\ntype websocketTestServer struct {\n\tpath string\n\tfile *os.File\n\tconn *websocket.Conn\n\tdone chan struct{}\n}\n\nfunc newTestWebsocketServer(filepath string) *websocketTestServer {\n\treturn &websocketTestServer{\n\t\tpath: filepath,\n\t\tdone: make(chan struct{}),\n\t}\n}\n\nfunc (s *websocketTestServer) handleWebsocket(w http.ResponseWriter, r *http.Request) {\n\tvar err error\n\n\ts.file, err = os.Create(s.path)\n\tif err != nil {\n\t\tlogger.Errorw(\"could not create file\", err)\n\t\treturn\n\t}\n\n\t// accept ws connection\n\tupgrader := websocket.Upgrader{}\n\ts.conn, err = upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlogger.Errorw(\"could not accept ws connection\", err)\n\t\treturn\n\t}\n\n\tgo func() {\n\t\tdefer func() {\n\t\t\t_ = s.file.Close()\n\n\t\t\t// close the connection only if it's not closed already\n\t\t\tif !websocket.IsUnexpectedCloseError(err) {\n\t\t\t\t_ = s.conn.Close()\n\t\t\t}\n\t\t}()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-s.done:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tmt, msg, err := s.conn.ReadMessage()\n\t\t\t\tif err != nil {\n\t\t\t\t\tif !websocket.IsUnexpectedCloseError(err) {\n\t\t\t\t\t\tlogger.Errorw(\"unexpected ws close\", err)\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tswitch mt {\n\t\t\t\tcase websocket.BinaryMessage:\n\t\t\t\t\t_, err = s.file.Write(msg)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogger.Errorw(\"could not write to file\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (s *websocketTestServer) close() {\n\tclose(s.done)\n}\n"
  },
  {
    "path": "test/test_content.go",
    "content": "// Copyright 2025 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//\thttp://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage test\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\n//----------------------------------------------------------------------\n// Utilities for checking AV sync for the given audio/video test sample\n// https://github.com/livekit/media-samples/avsync_minmotion_livekit*\n//----------------------------------------------------------------------\n\nconst (\n\ttestSampleSilenceLevel = -38\n\ttestSampleBeepLevel    = -30.0\n)\n\nvar (\n\trePTS  = regexp.MustCompile(`pts_time:([0-9.]+)`)\n\treYAVG = regexp.MustCompile(`lavfi\\.signalstats\\.YAVG[=:]\\s*([0-9.]+)`)\n\treRMS  = regexp.MustCompile(`RMS_level[=:](-?[0-9.infINFNaN]+)`)\n)\n\nfunc ffmpegVideoStats(videoPath, statsFile string) error {\n\tctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)\n\tdefer cancel()\n\n\tcmd := exec.CommandContext(ctx, \"ffmpeg\",\n\t\t\"-hide_banner\", \"-nostats\", \"-loglevel\", \"repeat+info\",\n\t\t\"-i\", videoPath,\n\t\t\"-map\", \"0:v:0\",\n\t\t\"-vf\", fmt.Sprintf(\"crop=w=iw:h=8:x=0:y=0,signalstats,metadata=print:file=%s\", statsFile),\n\t\t\"-f\", \"null\", \"-\")\n\n\tvar outBuf, errBuf bytes.Buffer\n\tcmd.Stdout = &outBuf\n\tcmd.Stderr = &errBuf\n\n\tif err := cmd.Run(); err != nil {\n\t\tif errors.Is(ctx.Err(), context.DeadlineExceeded) {\n\t\t\treturn fmt.Errorf(\"ffmpeg video stats timeout after 15s\")\n\t\t}\n\t\treturn fmt.Errorf(\"ffmpeg video stats extraction failed: %w\\nstdout:\\n%s\\nstderr:\\n%s\",\n\t\t\terr, outBuf.String(), errBuf.String())\n\t}\n\treturn nil\n}\n\nfunc ffmpegAudioStats(audioPath, statsFile string) error {\n\tctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)\n\tdefer cancel()\n\n\tcmd := exec.CommandContext(ctx, \"ffmpeg\",\n\t\t\"-hide_banner\", \"-nostats\", \"-loglevel\", \"repeat+info\",\n\t\t\"-i\", audioPath,\n\t\t\"-af\", fmt.Sprintf(\"pan=mono|c0=0.5*c0+0.5*c1,astats=metadata=1:reset=1,ametadata=print:key=lavfi.astats.Overall.RMS_level:file=%s\", statsFile),\n\t\t\"-f\", \"null\", \"-\")\n\n\tvar outBuf, errBuf bytes.Buffer\n\tcmd.Stdout = &outBuf\n\tcmd.Stderr = &errBuf\n\n\tif err := cmd.Run(); err != nil {\n\t\tif errors.Is(ctx.Err(), context.DeadlineExceeded) {\n\t\t\treturn fmt.Errorf(\"ffmpeg audio stats timeout after 15s\")\n\t\t}\n\t\treturn fmt.Errorf(\"ffmpeg audio stats extraction failed: %w\\nstdout:\\n%s\\nstderr:\\n%s\",\n\t\t\terr, outBuf.String(), errBuf.String())\n\t}\n\treturn nil\n}\n\nfunc ffmpegSilenceStats(audioPath string, noiseLevel int, minDuration float64) (*bytes.Buffer, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)\n\tdefer cancel()\n\n\tcmd := exec.CommandContext(ctx, \"ffmpeg\",\n\t\t\"-hide_banner\", \"-nostats\", \"-loglevel\", \"info\",\n\t\t\"-i\", audioPath,\n\t\t\"-af\", \"silencedetect=noise=\"+fmt.Sprintf(\"%d\", noiseLevel)+\"dB:d=\"+strconv.FormatFloat(minDuration, 'f', -1, 64),\n\t\t\"-f\", \"null\", \"-\")\n\n\tvar stderr bytes.Buffer\n\tcmd.Stderr = &stderr\n\n\tif err := cmd.Run(); err != nil {\n\t\tif errors.Is(ctx.Err(), context.DeadlineExceeded) {\n\t\t\treturn nil, fmt.Errorf(\"ffmpeg silence stats timeout after 15s\")\n\t\t}\n\t\treturn nil, fmt.Errorf(\"ffmpeg silence stats extraction failed: %w\\nstderr:\\n%s\",\n\t\t\terr, stderr.String())\n\t}\n\treturn &stderr, nil\n}\n\n// extractFlashTimestamps runs ffmpeg + signalstats on the top stripe\n// and returns one timestamp per flash event (YAVG >= 130, spaced ≥0.2s).\nfunc extractFlashTimestamps(videoPath, outPath string) ([]time.Duration, error) {\n\tlogFile := filepath.Join(outPath, \"video_flash.log\")\n\n\terr := ffmpegVideoStats(videoPath, logFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfile, err := os.Open(logFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ffmpeg video stats failed to open log file: %w\", err)\n\t}\n\tdefer file.Close()\n\n\tconst flashThreshold = 130.0\n\tconst minGap = 200 * time.Millisecond\n\n\tvar (\n\t\tflashes   []time.Duration\n\t\tlastFlash = -999 * time.Second\n\t\tcurPTS    time.Duration\n\t)\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\n\t\tif m := rePTS.FindStringSubmatch(line); len(m) == 2 {\n\t\t\t// PTS is logged as seconds (float); convert to duration\n\t\t\tif d, perr := parsePTSSecondsToDuration(m[1]); perr == nil {\n\t\t\t\tcurPTS = d\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif m := reYAVG.FindStringSubmatch(line); len(m) == 2 {\n\t\t\ty, _ := strconv.ParseFloat(m[1], 64)\n\t\t\tif y >= flashThreshold && curPTS-lastFlash > minGap {\n\t\t\t\tflashes = append(flashes, curPTS)\n\t\t\t\tlastFlash = curPTS\n\t\t\t}\n\t\t}\n\t}\n\treturn flashes, scanner.Err()\n}\n\n// extractBeepTimestamps runs ffmpeg + astats to find beeps.\n// A beep is when RMS_level > beepThreshold, debounced by 0.2s.\nfunc extractBeepTimestamps(audioPath string, beepThreshold float64, outPath string) ([]time.Duration, error) {\n\tlogFile := filepath.Join(outPath, \"audio_beep.log\")\n\n\terr := ffmpegAudioStats(audioPath, logFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfile, err := os.Open(logFile)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ffmpeg audio stats failed to open log file: %w\", err)\n\t}\n\tdefer file.Close()\n\n\tconst minGap = 200 * time.Millisecond\n\n\tvar (\n\t\tbeeps  []time.Duration\n\t\tlast   = -999 * time.Second\n\t\tcurPTS time.Duration\n\t)\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\n\t\tif m := rePTS.FindStringSubmatch(line); len(m) == 2 {\n\t\t\tif d, perr := parsePTSSecondsToDuration(m[1]); perr == nil {\n\t\t\t\tcurPTS = d\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif m := reRMS.FindStringSubmatch(line); len(m) == 2 {\n\t\t\tval := m[1]\n\t\t\tif strings.Contains(val, \"inf\") || strings.Contains(val, \"nan\") {\n\t\t\t\tcontinue // skip silence or invalid\n\t\t\t}\n\t\t\tlvl, _ := strconv.ParseFloat(val, 64)\n\t\t\tif lvl > beepThreshold && curPTS-last > minGap {\n\t\t\t\tbeeps = append(beeps, curPTS)\n\t\t\t\tlast = curPTS\n\t\t\t}\n\t\t}\n\t}\n\treturn beeps, scanner.Err()\n}\n\n// silenceRange represents one silence segment in durations.\ntype silenceRange struct {\n\tstart    time.Duration\n\tend      time.Duration\n\tduration time.Duration\n}\n\n// detectSilence runs ffmpeg silencedetect and returns all silence ranges.\nfunc detectSilence(audioPath string, noiseLevel int, minDuration time.Duration) ([]silenceRange, error) {\n\tstderr, err := ffmpegSilenceStats(audioPath, noiseLevel, minDuration.Seconds())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ranges []silenceRange\n\tvar current silenceRange\n\tinSilence := false\n\n\treStart := regexp.MustCompile(`silence_start:\\s*([0-9.]+)`)\n\treEnd := regexp.MustCompile(`silence_end:\\s*([0-9.]+)\\s*\\|\\s*silence_duration:\\s*([0-9.]+)`)\n\n\tscanner := bufio.NewScanner(stderr)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\n\t\tif m := reStart.FindStringSubmatch(line); len(m) == 2 {\n\t\t\tif start, perr := strconv.ParseFloat(m[1], 64); perr == nil {\n\t\t\t\tcurrent = silenceRange{start: secondsToDuration(start)}\n\t\t\t\tinSilence = true\n\t\t\t}\n\t\t}\n\n\t\tif m := reEnd.FindStringSubmatch(line); len(m) == 3 {\n\t\t\tif inSilence {\n\t\t\t\tend, _ := strconv.ParseFloat(m[1], 64)\n\t\t\t\tdur, _ := strconv.ParseFloat(m[2], 64)\n\t\t\t\tcurrent.end = secondsToDuration(end)\n\t\t\t\tcurrent.duration = secondsToDuration(dur)\n\t\t\t\tranges = append(ranges, current)\n\t\t\t\tinSilence = false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ranges, scanner.Err()\n}\n\nfunc secondsToDuration(f float64) time.Duration {\n\treturn time.Duration(f * float64(time.Second))\n}\n\nfunc parsePTSSecondsToDuration(s string) (time.Duration, error) {\n\tf, err := strconv.ParseFloat(strings.TrimSpace(s), 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn time.Duration(f * float64(time.Second)), nil\n}\n\nfunc averageSpacing(ts []time.Duration) (time.Duration, error) {\n\tif len(ts) < 2 {\n\t\treturn 0, fmt.Errorf(\"need at least 2 timestamps (got %d)\", len(ts))\n\t}\n\n\tvar sum time.Duration\n\tvar gaps int\n\tfor i := 1; i < len(ts); i++ {\n\t\td := ts[i] - ts[i-1]\n\t\tif d <= 0 {\n\t\t\t// skip non-positive gaps (duplicates or out-of-order anomalies)\n\t\t\tcontinue\n\t\t}\n\t\tsum += d\n\t\tgaps++\n\t}\n\tif gaps == 0 {\n\t\treturn 0, fmt.Errorf(\"no positive gaps to compute spacing\")\n\t}\n\treturn time.Duration(int64(sum) / int64(gaps)), nil\n}\n\nfunc requireDurationInDelta(t *testing.T, expected, actual, delta time.Duration, msgAndArgs ...interface{}) {\n\trequire.InDelta(t,\n\t\texpected.Nanoseconds(),\n\t\tactual.Nanoseconds(),\n\t\tfloat64(delta.Nanoseconds()),\n\t\tmsgAndArgs...)\n}\n"
  },
  {
    "path": "version/version.go",
    "content": "// Copyright 2023 LiveKit, Inc.\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage version\n\nconst (\n\tVersion         = \"1.12.0\"\n\tTemplateVersion = \"sha-594b3b1\"\n)\n"
  }
]