Repository: colinmarc/magic-mirror Branch: main Commit: f2be8a13bdac Files: 155 Total size: 1.2 MB Directory structure: gitextract_32ud61vu/ ├── .github/ │ ├── actions/ │ │ └── install-slang/ │ │ └── action.yaml │ └── workflows/ │ ├── bump-version.yaml │ ├── cliff.toml │ ├── docs.yaml │ ├── release-mmclient.yaml │ ├── release-mmserver.yaml │ └── tests.yaml ├── .gitignore ├── .gitmodules ├── .rustfmt.toml ├── BUILD.md ├── CHANGELOG.md ├── LICENSES/ │ ├── BUSL-1.1.txt │ └── MIT.txt ├── README.md ├── auto-release.sh ├── docs/ │ ├── .gitignore │ ├── config.toml │ ├── content/ │ │ ├── _index.md │ │ └── setup/ │ │ ├── client.md │ │ └── server.md │ └── templates/ │ └── footer.html ├── mm-client/ │ ├── Cargo.toml │ ├── build.rs │ └── src/ │ ├── audio/ │ │ └── buffer.rs │ ├── audio.rs │ ├── bin/ │ │ ├── latency-test.rs │ │ └── mmclient.rs │ ├── cursor.rs │ ├── delegate.rs │ ├── flash.rs │ ├── font.rs │ ├── gamepad.rs │ ├── keys.rs │ ├── lib.rs │ ├── overlay.rs │ ├── render.rs │ ├── render.slang │ ├── stats.rs │ ├── video.rs │ └── vulkan.rs ├── mm-client-common/ │ ├── Cargo.toml │ ├── bin/ │ │ └── uniffi-bindgen.rs │ └── src/ │ ├── attachment.rs │ ├── codec.rs │ ├── conn/ │ │ └── hostport.rs │ ├── conn.rs │ ├── display_params.rs │ ├── input.rs │ ├── lib.rs │ ├── logging.rs │ ├── packet/ │ │ └── ring.rs │ ├── packet.rs │ ├── pixel_scale.rs │ ├── session.rs │ ├── stats.rs │ └── validation.rs ├── mm-docgen/ │ ├── Cargo.toml │ └── src/ │ └── bin/ │ ├── config-docgen.rs │ └── protocol-docgen.rs ├── mm-protocol/ │ ├── Cargo.toml │ ├── build.rs │ └── src/ │ ├── lib.rs │ ├── messages.proto │ └── timestamp.rs ├── mm-server/ │ ├── Cargo.toml │ ├── build.rs │ ├── deny.toml │ └── src/ │ ├── codec.rs │ ├── color.rs │ ├── config.rs │ ├── container/ │ │ ├── ipc.rs │ │ └── runtime.rs │ ├── container.rs │ ├── encoder/ │ │ ├── dpb.rs │ │ ├── gop_structure.rs │ │ ├── h264.rs │ │ ├── h265.rs │ │ ├── rate_control.rs │ │ └── stats.rs │ ├── encoder.rs │ ├── main.rs │ ├── pixel_scale.rs │ ├── server/ │ │ ├── handlers/ │ │ │ ├── attachment/ │ │ │ │ └── stats.rs │ │ │ ├── attachment.rs │ │ │ └── validation.rs │ │ ├── handlers.rs │ │ ├── mdns.rs │ │ ├── sendmmsg.rs │ │ └── stream.rs │ ├── server.rs │ ├── session/ │ │ ├── audio/ │ │ │ ├── buffer.rs │ │ │ └── pulse.rs │ │ ├── audio.rs │ │ ├── compositor/ │ │ │ ├── buffers/ │ │ │ │ ├── modifiers.rs │ │ │ │ └── syncobj_timeline.rs │ │ │ ├── buffers.rs │ │ │ ├── dispatch/ │ │ │ │ ├── shm.rs │ │ │ │ ├── wl_buffer.rs │ │ │ │ ├── wl_compositor.rs │ │ │ │ ├── wl_data_device_manager.rs │ │ │ │ ├── wl_drm.rs │ │ │ │ ├── wl_output.rs │ │ │ │ ├── wl_seat.rs │ │ │ │ ├── wl_shm.rs │ │ │ │ ├── wp_fractional_scale.rs │ │ │ │ ├── wp_linux_dmabuf.rs │ │ │ │ ├── wp_linux_drm_syncobj.rs │ │ │ │ ├── wp_pointer_constraints.rs │ │ │ │ ├── wp_presentation.rs │ │ │ │ ├── wp_relative_pointer.rs │ │ │ │ ├── wp_text_input.rs │ │ │ │ ├── xdg_shell.rs │ │ │ │ └── xwayland_shell.rs │ │ │ ├── dispatch.rs │ │ │ ├── oneshot_render.rs │ │ │ ├── output.rs │ │ │ ├── protocols/ │ │ │ │ ├── wayland-drm.xml │ │ │ │ └── wl_drm.rs │ │ │ ├── protocols.rs │ │ │ ├── sealed.rs │ │ │ ├── seat.rs │ │ │ ├── serial.rs │ │ │ ├── shm.rs │ │ │ ├── stack.rs │ │ │ ├── surface.rs │ │ │ ├── xwayland/ │ │ │ │ └── xwm.rs │ │ │ └── xwayland.rs │ │ ├── compositor.rs │ │ ├── control.rs │ │ ├── handle.rs │ │ ├── input/ │ │ │ └── udevfs.rs │ │ ├── input.rs │ │ ├── reactor.rs │ │ ├── video/ │ │ │ ├── composite.rs │ │ │ ├── composite.slang │ │ │ ├── convert.rs │ │ │ └── convert.slang │ │ └── video.rs │ ├── session.rs │ ├── state.rs │ ├── vulkan/ │ │ ├── chain.rs │ │ ├── drm.rs │ │ ├── timeline.rs │ │ └── video.rs │ ├── vulkan.rs │ └── waking_sender.rs ├── mmserver.default.toml ├── shader-common/ │ └── color.slang └── test-apps/ ├── Cargo.toml ├── bin/ │ ├── color.rs │ ├── cursorlock.rs │ └── latency.rs ├── build.rs └── src/ └── color-test.slang ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/actions/install-slang/action.yaml ================================================ name: Install slang inputs: version: required: true target: required: true token: required: true runs: using: "composite" steps: - name: install slang shell: bash run: | mkdir $RUNNER_TEMP/slang slang_url=$( gh api https://api.github.com/repos/shader-slang/slang/releases/tags/${{ inputs.version }} |\ jq -r '.assets[].browser_download_url' | grep ${{ inputs.target }}.tar.gz | head -1 ) (cd $RUNNER_TEMP/slang && curl -o - -fsSL "$slang_url" | tar zxv) echo "SLANG_DIR=$RUNNER_TEMP/slang" >> "$GITHUB_ENV" echo "LD_LIBRARY_PATH=$RUNNER_TEMP/slang/lib" >> "$GITHUB_ENV" echo "DYLD_LIBRARY_PATH=$RUNNER_TEMP/slang/lib" >> "$GITHUB_ENV" env: GH_TOKEN: ${{ inputs.token }} ================================================ FILE: .github/workflows/bump-version.yaml ================================================ on: push: branches: [main] name: Open a PR to bump the version jobs: open_pr: strategy: matrix: component: ["server", "client"] name: Open PR runs-on: ubuntu-24.04 permissions: pull-requests: write contents: write steps: - uses: dtolnay/rust-toolchain@stable with: targets: x86_64-unknown-linux-gnu - uses: actions/checkout@v4 with: fetch-depth: 0 fetch-tags: true - uses: swatinem/rust-cache@v2 - run: cargo install git-cliff@^2.6 cargo-edit@^0.12 - name: determine version run: | echo "COMPONENT=${{ matrix.component }}" | tee -a "$GITHUB_ENV" echo "CURRENT_VERSION=$( git tag | grep "${{ matrix.component }}" | tail -1 )" | tee -a "$GITHUB_ENV" mm_component="mm-${{ matrix.component }}" echo "MM_COMPONENT=$mm_component" | tee -a "$GITHUB_ENV" version=$( git cliff -c .github/workflows/cliff.toml \ --bumped-version \ --include-path "$MM_COMPONENT*/**/*" \ --tag-pattern "${{ matrix.component }}" ) echo "BUMPED_VERSION=$version" | tee -a "$GITHUB_ENV" echo "BUMPED_VERSION_SHORT=$( echo $version | sed -E 's/^[a-z]+-v(.*)/\1/' )" | tee -a "$GITHUB_ENV" - name: replace version in files if: ${{ env.BUMPED_VERSION != env.CURRENT_VERSION }} run: | git grep --cached -l '' | grep -v CHANGELOG |\ xargs sed -i -E "s/mm$COMPONENT-v[0-9]+\.[0-9]+\.[0-9]+/$BUMPED_VERSION/g" - name: replace version in Cargo.toml if: ${{ env.BUMPED_VERSION != env.CURRENT_VERSION }} run: (cd $MM_COMPONENT && cargo set-version --offline $BUMPED_VERSION_SHORT) - name: cargo update if: ${{ env.BUMPED_VERSION != env.CURRENT_VERSION }} run: (cd $MM_COMPONENT && cargo update $MM_COMPONENT) - name: update BUSL change date if: ${{ env.BUMPED_VERSION != env.CURRENT_VERSION && matrix.component == 'server' }} run: | change_date=$(date -d "4 years hence" +%Y-%m-01) # Round down to the 1st of the month sed -i -E "/Change/s/[0-9]{4}-[0-9]{2}-[0-9]{2}/$change_date/" LICENSES/BUSL-1.1.txt - name: update CHANGELOG.md if: ${{ env.BUMPED_VERSION != env.CURRENT_VERSION }} run: | git cliff -c .github/workflows/cliff.toml \ --include-path "$MM_COMPONENT*/**/*" \ --tag-pattern "$COMPONENT" \ -t "$BUMPED_VERSION" -u \ -p CHANGELOG.md - name: generate PR body if: ${{ env.BUMPED_VERSION != env.CURRENT_VERSION }} run: | git cliff -c .github/workflows/cliff.toml \ --include-path "$MM_COMPONENT*/**/*" \ --tag-pattern "$COMPONENT" \ -t "$BUMPED_VERSION" -u > "$RUNNER_TEMP/pr-body.txt" - name: open PR if: ${{ env.BUMPED_VERSION != env.CURRENT_VERSION }} id: cpr uses: peter-evans/create-pull-request@6d6857d36972b65feb161a90e484f2984215f83e with: draft: true branch: "auto-bump-${{ matrix.component }}" title: ":robot: bump mm${{ matrix.component }} to ${{ env.BUMPED_VERSION }}" commit-message: "chore: release ${{ env.BUMPED_VERSION }}" body-path: "${{ runner.temp }}/pr-body.txt" ================================================ FILE: .github/workflows/cliff.toml ================================================ [changelog] render_always = true body = """ {% if version %}\ ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} {% else %}\ ## [unreleased] {% endif %}\ {% for group, commits in commits | group_by(attribute="group") %} ### {{ group | striptags | trim | upper_first }} {% for commit in commits %} - {{ commit.message | upper_first }} \ ({{ commit.id }})\ {% endfor %} {% endfor %}\n """ [git] commit_parsers = [ { message = "^feat", group = "New Features" }, { message = "^fix", group = "Bugfixes" }, { message = "^doc", skip = true }, { message = "^perf", skip = true }, { message = "^refactor", skip = true }, { message = "^style", skip = true }, { message = "^test", skip = true }, { message = "^chore|^ci", skip = true }, { message = "build", skip = true }, { body = ".*security", skip = true }, { message = "^revert", skip = true }, ] [bump] features_always_bump_minor = false breaking_always_bump_major = false ================================================ FILE: .github/workflows/docs.yaml ================================================ on: push: branches: [main, docs] name: Build documentation site jobs: build: name: Build runs-on: ubuntu-24.04 steps: - uses: dtolnay/rust-toolchain@stable - uses: actions/checkout@v4 with: submodules: true - uses: swatinem/rust-cache@v2 with: workspaces: | mm-protocol mm-client-common - name: install protoc run: | sudo apt update sudo apt install protobuf-compiler - name: install zola uses: taiki-e/install-action@v2 with: tool: zola@0.19.2 - name: generate config reference run: | mkdir -p docs/content/reference cargo run --manifest-path mm-docgen/Cargo.toml --bin config-docgen \ mmserver.default.toml > docs/content/reference/config.md - name: generate protocol reference run: | cargo run --manifest-path mm-docgen/Cargo.toml --bin protocol-docgen \ mm-protocol/src/messages.proto > docs/content/reference/protocol.md - name: zola build run: zola -r docs build -o docs/build - name: generate rustdoc for mm-protocol run: | cargo doc --manifest-path mm-protocol/Cargo.toml \ --no-deps --target-dir docs/build - name: generate rustdoc for mm-client-common run: | cargo doc --manifest-path mm-client-common/Cargo.toml \ --no-deps --target-dir docs/build - name: Upload static files id: deployment uses: actions/upload-pages-artifact@v3 with: path: docs/build deploy: name: Deploy runs-on: ubuntu-latest needs: build permissions: pages: write id-token: write environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} steps: - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v4 ================================================ FILE: .github/workflows/release-mmclient.yaml ================================================ on: push: tags: - 'mmclient-v*.*.*' name: Release mmclient jobs: create_tarball_linux: name: Build mmclient (linux) runs-on: ubuntu-24.04 steps: - uses: dtolnay/rust-toolchain@stable with: targets: x86_64-unknown-linux-gnu - name: install deps run: | sudo apt update sudo apt install \ nasm cmake protobuf-compiler libxkbcommon-dev libwayland-dev libasound2-dev \ ffmpeg libavutil-dev libavformat-dev libavdevice-dev libavfilter-dev \ libfontconfig-dev libfreetype-dev libudev-dev - uses: actions/checkout@v4 - uses: ./.github/actions/install-slang with: token: ${{ secrets.GITHUB_TOKEN }} target: linux-x86_64 version: v2025.5 - uses: swatinem/rust-cache@v2 with: workspaces: | mm-client mm-protocol - name: cargo build run: (cd mm-client && cargo build --bin mmclient --release --target x86_64-unknown-linux-gnu) - name: create release tarball run: |- mkdir "${RUNNER_TEMP}/${GITHUB_REF_NAME}" cp -r mm-client/target/x86_64-unknown-linux-gnu/release/mmclient \ README.md CHANGELOG.md \ "${RUNNER_TEMP}/${GITHUB_REF_NAME}" cp LICENSES/MIT.txt "${RUNNER_TEMP}/${GITHUB_REF_NAME}/LICENSE.txt" tar -C "${RUNNER_TEMP}" --numeric-owner -cvzf "${GITHUB_REF_NAME}-linux-amd64.tar.gz" "$GITHUB_REF_NAME" - name: upload tarball uses: actions/upload-artifact@v4 with: name: mmclient-linux path: mmclient-*.tar.gz create_tarball_macos: name: Build mmclient (macos arm) runs-on: macos-latest steps: - uses: dtolnay/rust-toolchain@stable with: targets: aarch64-apple-darwin - name: install deps run: | brew install ffmpeg@6 protobuf brew link ffmpeg@6 - uses: actions/checkout@v4 - uses: ./.github/actions/install-slang with: token: ${{ secrets.GITHUB_TOKEN }} target: macos-aarch64 version: v2024.15.2 - uses: swatinem/rust-cache@v2 with: workspaces: | mm-client mm-protocol - name: cargo build run: (cd mm-client && cargo build --bin mmclient --release --features moltenvk_static --target aarch64-apple-darwin) - name: create release tarball run: |- mkdir "${RUNNER_TEMP}/${GITHUB_REF_NAME}" cp -r mm-client/target/aarch64-apple-darwin/release/mmclient \ README.md CHANGELOG.md \ "${RUNNER_TEMP}/${GITHUB_REF_NAME}" cp LICENSES/MIT.txt "${RUNNER_TEMP}/${GITHUB_REF_NAME}/LICENSE.txt" gtar -C "${RUNNER_TEMP}" --numeric-owner -cvzf "${GITHUB_REF_NAME}-darwin-arm64.tar.gz" "$GITHUB_REF_NAME" - name: upload tarball uses: actions/upload-artifact@v4 with: name: mmclient-mac path: mmclient-*.tar.gz create_tarball_macos_intel: name: Build mmclient (macos intel) runs-on: macos-13 steps: - uses: dtolnay/rust-toolchain@stable with: targets: x86_64-apple-darwin - name: install deps run: | brew install ffmpeg@6 protobuf brew link ffmpeg@6 - uses: actions/checkout@v4 - uses: ./.github/actions/install-slang with: token: ${{ secrets.GITHUB_TOKEN }} target: macos-x86_64 version: v2024.15.2 - uses: swatinem/rust-cache@v2 with: workspaces: | mm-client mm-protocol - name: cargo build run: (cd mm-client && cargo build --bin mmclient --release --features moltenvk_static --target x86_64-apple-darwin) - name: create release tarball run: |- mkdir "${RUNNER_TEMP}/${GITHUB_REF_NAME}" cp -r mm-client/target/x86_64-apple-darwin/release/mmclient \ README.md CHANGELOG.md \ "${RUNNER_TEMP}/${GITHUB_REF_NAME}" cp LICENSES/MIT.txt "${RUNNER_TEMP}/${GITHUB_REF_NAME}/LICENSE.txt" gtar -C "${RUNNER_TEMP}" --numeric-owner -cvzf "${GITHUB_REF_NAME}-darwin-amd64.tar.gz" "$GITHUB_REF_NAME" - name: upload tarball uses: actions/upload-artifact@v4 with: name: mmclient-mac-intel path: mmclient-*.tar.gz create_release: name: Create release needs: [create_tarball_linux, create_tarball_macos, create_tarball_macos_intel] runs-on: ubuntu-24.04 permissions: contents: write steps: - uses: actions/checkout@v4 with: fetch-depth: 0 fetch-tags: true - uses: dtolnay/rust-toolchain@stable - name: install git-cliff run: cargo install git-cliff - name: generate release notes run: |- echo "# Client version ${GITHUB_REF_NAME/mmclient-v/}" >> release-notes.txt git cliff -c .github/workflows/cliff.toml \ --include-path "mm-client/**/*" \ --include-path "mm-client-common/**/*" \ --tag-pattern "client" \ --latest | tail -n +2 | tee -a release-notes.txt - name: download artifacts uses: actions/download-artifact@v4 with: merge-multiple: true - name: create release uses: softprops/action-gh-release@v2 with: body_path: release-notes.txt files: "mmclient-*.tar.gz" ================================================ FILE: .github/workflows/release-mmserver.yaml ================================================ on: push: tags: - 'mmserver-v*.*.*' name: Release mmserver jobs: create_release: name: Create mmserver release runs-on: ubuntu-24.04 permissions: contents: write steps: - uses: dtolnay/rust-toolchain@stable with: targets: x86_64-unknown-linux-gnu - name: install deps run: | sudo apt update sudo apt install nasm cmake protobuf-compiler libxkbcommon-dev - uses: actions/checkout@v4 with: fetch-depth: 0 fetch-tags: true - uses: ./.github/actions/install-slang with: token: ${{ secrets.GITHUB_TOKEN }} target: linux-x86_64 version: v2025.5 - uses: swatinem/rust-cache@v2 with: workspaces: | mm-server mm-client mm-protocol - name: cargo build run: (cd mm-server && cargo build --bin mmserver --release --target x86_64-unknown-linux-gnu) - name: create release tarball run: |- mkdir "${RUNNER_TEMP}/${GITHUB_REF_NAME}" cp -r mm-server/target/x86_64-unknown-linux-gnu/release/mmserver \ README.md CHANGELOG.md mmserver.default.toml \ "${RUNNER_TEMP}/${GITHUB_REF_NAME}" cp LICENSES/BUSL-1.1.txt "${RUNNER_TEMP}/${GITHUB_REF_NAME}/LICENSE.txt" tar -C "${RUNNER_TEMP}" --numeric-owner -cvzf "${GITHUB_REF_NAME}-linux-amd64.tar.gz" "$GITHUB_REF_NAME" - name: install git-cliff run: cargo install git-cliff - name: generate release notes run: |- echo "# Server version ${GITHUB_REF_NAME/mmserver-v/}" >> release-notes.txt git cliff -c .github/workflows/cliff.toml \ --include-path "mm-server/**/*" \ --tag-pattern "server" \ --latest | tail -n +2 | tee -a release-notes.txt - name: create release uses: softprops/action-gh-release@v2 with: body_path: release-notes.txt files: "mmserver-*.tar.gz" ================================================ FILE: .github/workflows/tests.yaml ================================================ on: push: branches: [main, test-ci] pull_request: branches: [main] name: Tests jobs: tests: name: Tests runs-on: ubuntu-24.04 steps: - uses: dtolnay/rust-toolchain@stable - name: install deps run: | sudo apt update sudo apt install \ nasm cmake protobuf-compiler libxkbcommon-dev libwayland-dev libasound2-dev \ ffmpeg libavutil-dev libavformat-dev libavdevice-dev libavfilter-dev \ libfontconfig-dev libfreetype-dev libudev-dev - uses: actions/checkout@v4 - uses: ./.github/actions/install-slang with: token: ${{ secrets.GITHUB_TOKEN }} target: linux-x86_64 version: v2025.5 - uses: swatinem/rust-cache@v2 with: workspaces: | mm-server mm-client mm-protocol - name: install deny run: cargo install cargo-deny - name: server deny run: (cd mm-server && cargo deny check) - name: server tests run: | export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER='sudo -E' (cd mm-server && cargo test -- --test-threads=1) - name: protocol tests run: (cd mm-protocol && cargo test) - name: client tests run: (cd mm-client && cargo test) - name: server cargo clippy run: (cd mm-server && cargo clippy) - name: protocol cargo clippy run: (cd mm-protocol && cargo clippy) - name: client cargo clippy run: (cd mm-client && cargo clippy) ================================================ FILE: .gitignore ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT target .vscode .reuse *.log mm-protocol/Cargo.lock mm-client-common/Cargo.lock ================================================ FILE: .gitmodules ================================================ [submodule "docs/themes/anemone"] path = docs/themes/anemone url = https://github.com/Speyll/anemone ================================================ FILE: .rustfmt.toml ================================================ use_field_init_shorthand = true use_try_shorthand = true unstable_features = true format_code_in_doc_comments = true format_macro_bodies = true format_macro_matchers = true format_strings = true group_imports = "StdExternalCrate" normalize_doc_attributes = true wrap_comments = true ================================================ FILE: BUILD.md ================================================ ## Building `mmserver` The following are required to build the server and its dependencies: ``` rust (MSRV 1.77.2) nasm cmake protoc libxkbcommon ``` Besides rust, the following command will install everything on ubuntu: ``` apt install nasm cmake protobuf-compiler libxkbcommon-dev ``` Then you should be good to go: ``` cd mm-server cargo build --bin mmserver [--release] ``` ### Feature flags The following feature flags are available: - `vulkan_encode` (on by default) - enables hardware encode - `svt_encode` (on by default) - enables svt-av1 and svt-hevc for CPU encode - `ffmpeg_encode` - allows using system-installed ffmpeg to do CPU encode Note that `ffmpeg_encode` takes precedence over `svt_encode` if enabled, but the server will always choose hardware encode if available on your platform. ## Building `mmclient` The following are required to build the client and its dependencies: ``` rust (MSRV 1.77.2) nasm cmake protoc libxkbcommon (only linux) libwayland-client (only linux) alsa (only linux) ffmpeg 6.x ``` Besides rust, the following command will install everything on ubuntu: ``` apt install \ nasm cmake protobuf-compiler libxkbcommon-dev libwayland-dev libasound2-dev \ ffmpeg libavutil-dev libavformat-dev libavdevice-dev libavfilter-dev ``` Or using homebrew on macOS: ``` brew install nasm cmake ffmpeg@6 protobuf ``` ================================================ FILE: CHANGELOG.md ================================================ ## [mmserver-v0.8.4] - 2025-05-21 ### Bugfixes - Make missing hardware encode support a hard error (b16dccb01902b854a2c345406f4df416d3024811) ## [mmserver-v0.8.3] - 2025-03-12 ### Bugfixes - Try to avoid colliding with the system x11 socket (3af95ba9ab012e723d21415baf0b6f4679ba1534) - Follow symlinks when calling move_mount (a7505aed296cab4648e2f3752d5901e5d95ded45) ## [mmserver-v0.8.2] - 2025-02-20 ### Bugfixes - Drop application frames if the application is too slow (d73d78dfc37a17fd011c1d3ef1dbfe12c85ed856) ## [mmclient-v0.7.0] - 2025-02-12 ### New Features - Deprecate KeepAlive in favor of connection keepalives (ad3cdca8faf089b85902977e3e48b4a35d5f89e3) - Send hierarchical_layer as video frame metadata (ddbe84346fa03f55ebed7289b005b2e36ec23d36) - Expose hierarchical_layer (b4bd4c66b62439c71c6a3ed52c26046b3a2f0b6f) - Allow clients to configure their connection timeout (452ef70eb118280df9170bed382b8539813e7802) ### Bugfixes - Remove a useless warning (c98bbe5382914dec22c92f4f160b2f276fb811ef) ## [mmserver-v0.8.0] - 2025-02-12 ### New Features - Make the session timeout configurable (39fa20cadfe7a780088c86f78ef2eae87e0c1222) - Send hierarchical_layer as video frame metadata (ddbe84346fa03f55ebed7289b005b2e36ec23d36) ### Bugfixes - Increment stream_seq when a refresh packet is sent (0fe282ae0fb71b476929ddf61bf71e4041ac0323) - Send headers with H265 keyframes (dc7084412c3c4eec661a9887f8c0d031f8dc8a19) - Add a warning if users are about to hit #29 (d5591bb4d59a635ffb9796b9c1f1cba9eba22b36) ## [mmserver-v0.7.2] - 2025-02-05 ### New Features - Deprecate KeepAlive in favor of connection keepalives (ad3cdca8faf089b85902977e3e48b4a35d5f89e3) ### Bugfixes - Remove noexec from /tmp in the container (0c534f6677e07cda77e0384854dded47dd8a949b) - Support resampling app audio output (897a053abc568255040a66c356703e3e6c3c9070) - Support downmixing by throwing away extra channels (17d81d866efc94ed2c2839589541362be3d5aae1) - Allow subtitles in application names (2fcac04765ce4af02923314667289ed88094f824) - Use aligned width and height for DPB images (d6f3bf713373bbadde0590f44659f8146e44c28d) - Relax the app startup timeout (a840c2b27b7adda073820a62d72fd64dc90e752b) - Use QUIC PING frames to keep clients alive (95ddb3d6bdc7e761ff596e249cc7be83b3d14cfb) - Don't flood the client with pings (b3b3194c042b8d56fda1f8b08f230042bf4461f7) - Turn down the heat on udevfs logging (53f448e45110ef722a60f927edd7c5fe58455a19) ## [mmserver-v0.7.1] - 2025-01-30 ### New Features - Let the encoder swap frames (b519680e3e8c552874f53cd88e98859e90698ac8) ### Bugfixes - Update quiche (c3d1e0080c1040151ecdc08e85584ff267f6eed6) - Remove an extra copy on the dgram path (0d204fa2549f4fd9abc804f4996f8fe11162e67b) - Fix clippy warning (7b041bbff7c908e57928d621ad74751bb7b76355) - Respect layer limits in VkVideoEncodeCapabilities (ce515b4d85b8af4da83a3fed0281f907e28253b1) - Print child logs correctly (e8cd88fb344ec74398eae09e757589700de3bff3) - Print an error when dmabuf importing fails (112d48d706fc19ce8882e2767c33672e7a044527) - Change target for vulkan error logs (641f51675a572402710fe3ee2ff0721857228ab9) - Add context (7572ac6a5131486f6d4cf6951742eea9c0f24d25) - Use the default congestion control algorithm (7245e624f785b2b01e0b5da507380a88121de542) - Get explicit sync working on NVIDIA (8d5786445e56629c67338258b1bc8cc7debb410e) - Remove unused import (df03d5d38236b325e826b998cef26ea2d9008e75) - Fix explicit sync on nvidia proprietary (8f806b233f537395d72de6e06d3861e73963bec2) - Check for the right nvidia version (08b6462a320ee76eabaf4387c354a1a6634ec8df) - Fix explicit sync on nvidia proprietary (3c70a79cda9cc545acf665ccacc495ed30f3440a) ## [mmclient-v0.6.0] - 2025-01-26 ### New Features - Support ffmpeg vulkan decode (5c76b29273d3c0b29edb9e34e33096af76814398) - Explicit video refresh (60dffc04f4f338c3fce6d791211c12d7471a187a) - Implement forward error correction (729e652a001d155345c80b7f5fef397a884a1a98) ### Bugfixes - Enforce non-fractional scales from the client (2a25ca95db01ff8460328f8f258faadf55d948bb) - Take application names with path included (100d51e8f44129a23b1df944a897a3123ef12d1c) ## [mmserver-v0.7.0] - 2025-01-26 ### New Features - Enable hierarchical coding (90d636ffba8379da420e09c6f228fb65c334a7f2) - Explicit video refresh (60dffc04f4f338c3fce6d791211c12d7471a187a) - Implement forward error correction (729e652a001d155345c80b7f5fef397a884a1a98) - Remove support for CPU encoders (a5d069cb7bde15931748e41ae3d9e12a6f917445) - Log basic attachment stats (b42cb40cb3d5fbedd2a17d37e09da8984029998c) ### Bugfixes - Pass correct flags to move_mount (af519eebc5a8f251624b3d063a7241910cddf2cc) - Pass correct flags to move_mount (take 2) (2e6053675a229dba4fc012b5de4afb723e9a0aca) - Enforce non-fractional scales from the client (2a25ca95db01ff8460328f8f258faadf55d948bb) - Disable explicit sync if the syncobj_surface is destroyed (e6017dec6bb9daadbbe50898f7cd9cf7c14b19aa) - Reduce the verbosity of some logging (b0abe2a76466e98ba8d4f844e88fd4ad4ce6c7ee) - Print frame duration from encoder (4a9af4f712ce723306364bece359c9bf18515554) - Add overall encode timing to tracy (334d5b37fe394e652bd27224c1c8e905e9c8a794) ## [mmserver-v0.6.1] - 2024-12-17 ### New Features - Save vulkaninfo for --bug-report (6deae3feb5a72a7e0099edd4983814d7fc873f15) ### Bugfixes - Avoid an endless loop when printing child output (7d700dfa4b9ef6d02e58c4a32151e69055fa3929) ## [mmserver-v0.6.0] - 2024-12-11 ### New Features - Support wp_linux_drm_syncobj_timeline (54f311653d800cf5a7aefe1b54edd27010f219ce) - Officially support nvidia proprietary (204126cdfcce09f4971de2e1bb9c86a4adf04d97) ### Bugfixes - Bind-mount nvidia devices, if present (4bb63d3c1e85f297c5d169219943694f133bbcfa) ## [mmclient-v0.5.0] - 2024-12-11 ### New Features - Add 'app_path' for organizing apps (b417559625c97e182dc074a5732ea35617332f36) - Add header images to the application list (756bfa866020da57be18d383367e0a2b189051aa) ### Bugfixes - Use Error::ServerError to communicate server errors (a857e0f186b9514cd3e1dc9b0f60df04b4abe3fe) - Correctly represent cursor images as optional (b08c76c9c65441fa92156f5282e9b02e98fa3ed9) - Be more resilient sending messages on closed streams (8e3eea65ccff2b6448dd9993b9afef9996c6650d) - Ensure attachment_ended is called on delegate (fd4d1c41e7da5ec949e26c91cc6171db1a41b1ce) - Always send pointer_left events (06010c9cf336d637526dcc308d1ee842e3a21cc1) - Handle ipv6 addresses correctly (9d442d2c8ad4c8cbfef96cb378289e1699d17e02) - Log connection errors (0ecc6ef05a5470991f1df8d0feaf18ace99b8de8) - Remove zone identifiers from hostnames (f9cee190718dc71aad8e9a0372b581a611551289) ## [mmserver-v0.5.6] - 2024-12-08 ### New Features - Warn if the client is using shm buffers (461e8913d9645c240d30a2ce1d269f8ba8aa0e39) - Support wp_fractional_scale (2a267e102add6fb72504652375d9ea48ec2c6484) ### Bugfixes - Handle invalid executables more comprehensively (f51174eb1509cecc73c10ab57cf991ee12a5cce7) - Throw an error if the app exe would be shadowed (cc6ee7e3df086bba443bb41471d671a2bd1b191b) - Reset staging query state each frame (982afb811ec062ddb6cc498a9cb92e6a4b5472ef) - Handle stride of shm buffers correctly (e8e1ee5eeba71d767c543ae83c4fa09b381beba1) - Log when container setup fails (83ea7b46fb95e1f1811cf516c55343622f9d9d35) - Put the XWayland socket in a canonical location (76056acbdc084307c6d71a66d2c7a343adea9b77) - Never discard surface content (f28e947201bc53be91ed13a53ad0221c27f931fb) - Handle xdg_popups more gracefully (03e392506a52349a4fdb075f4a4e53008a237958) - Translate surface coordinates correctly (9107636d2cb835409df3f604c47eed2d7397e819) - Shadow /run/user but not /run (c810f24305a169d896cbe92b57d53fd732bdef09) ## [mmserver-v0.5.5] - 2024-12-05 ### New Features - MDNS service discovery (152d82ca7595063aa77db7470e1dfdace9ae7ac2) - Add 'app_path' for organizing apps (b417559625c97e182dc074a5732ea35617332f36) - Make the mDNS instance name configurable (17e632ccbee15132e2420a5fc162c94171d4a34a) - Add header images to the application list (756bfa866020da57be18d383367e0a2b189051aa) ### Bugfixes - Align h265 bitstreams correctly (fc0543889b70eb0a151084d6a117e464cbeaaca0) - Improve error message when using self-signed certs (211dbcded77dc6fd0d97f19a415ca4b286327fb9) - Handle differing width/height in encode granularity (6b4b2dac3473d3631da6daa31fd09dc1bd3e2059) - Update the maximum message size to reflect the protocol docs (c517624d3683b7ad1e37fc7ea6a18d86c09ccb75) - Remove unecessary casts (d28b0b4335eb3e220b004421395e1f7d1d874939) - Warn when no hardware encoder is available (bef772948bbb7ff04788016fe74a84eefa7dee8c) - Bail early on mesa 24.2 (17758e3269ba661541ee2e94616606f2d935c626) ## [mmserver-v0.5.4] - 2024-11-18 ### Bugfixes - Handle missing /sys/devices/virtual/input (8f316fe41c41101ae18156a41abe2e9ba1e3497f) - Lock pointer based on pointer focus (4ce202d3bd9cb764c0586cdc83e890843c3c04d7) - Correctly handle an edge case with pointer locks (7c3428932651a372c69b25d1f77dc973746273a9) ## [mmserver-v0.5.3] - 2024-10-24 ### Bugfixes - Be consistent in xwayland socket naming (f6f6db3ab8b61e7af7684f14202d2b203b7e7760) - Never use a 0 audio stream_seq (632bcb1f7c79d35701f31a29d2dbe659ab411e3c) - Use the attachment coordinate space (57a59f478a6e4e248490b04b8c1ab42d2b1ae115) - Don't close streams while partial writes are pending (0add85078734a27e121dda97293f0e48d8ebd214) ## [mmclient-v0.4.1] - 2024-10-24 ### Bugfixes - Handle video/audio stream seq more intelligently (4bab3902d1e7d88c7222ed6ef404190c512b1940) - Make the overlay work again (0b1579bf68b2cd31611ca10a735061ef58e64604) - Use the attachment coordinate space (57a59f478a6e4e248490b04b8c1ab42d2b1ae115) - Don't close streams while partial writes are pending (0add85078734a27e121dda97293f0e48d8ebd214) - Send relative pointer motion again (7fced702ebe37de5b2f96e46091c6b862806f757) ## [mmserver-v0.5.2] - 2024-10-19 ### Bugfixes - Use getgid if we want the group ID (6a9c71d25d58ff6b5bc4564b99230d76a6599f0e) - Use _exit instead of exit or abort (c33a7b8989121706e0286af5efcdd8b5cf1291f1) - Pass locale environment variables through to child apps (8022fd1bdb8e64918e15f38b2b4197361841f9d5) ## [mmserver-v0.5.1] - 2024-10-18 ### Bugfixes - Correctly emulate input nodes in udevfs (3fec928dcb5d7d5054d6ca7821864bae74559b9b) - Increase the ready timeout (df5ba10642c5ec18064a67f8279d40d3b12baa76) - Stub wl_data_device_manager (af1853aaf34c373617b78ddbfbde2d37a977d3df) - Don't discard buffers when resending frame callbacks (3b9ce4164bb617ce7e0fd0840bad74fd281fda99) - Organize bug report files slightly better (1806d3eea0e33c124f58d413fc3843e288cc0b0e) ## [mmclient-v0.4.0] - 2024-10-18 ### New Features - Plumb controller input through to the server (990f48cdac4181e69ac3cb5dd1473fe16fca3390) - Allow specifying 'permanent' gamepads for a session (1d5b7f0a38017e0589c928a9acb6a10075bfac52) - Refactor out most of mmclient into a UniFFI rust lib (e8097e594b72a336ace6ef5fe7247304a18dd364) - List applications the server can launch (5d042be0f51095e06bbf68cdc3d3e40523c3e5ad) - Add a logging interface (b961041ce28b7da961f193b17cd03f4e36c14ea7) ### Bugfixes - Remove unecessary clone (87c95e63f6c6ce2f63207f96da839408f4617785) - Rename Gamepad* enums to reduce the possibility of collision (5fd2241beff203c5c09089456e9326102213c2c2) - Prevent a reattaching doom loop (dfa5d75e8daefa3dc15468145f55a5d06e7cd6e1) - Correctly invert joystick direction (a60eb398b5f1dd13e1ac660f856a03857decad5b) - Round off window height (d4227e772a7d6c8d30919b1e08876ee4a2e55802) - Handle gamepad connected events correctly (aed00821a8ce3add26ef3ff2226b26e0752c1971) - Increase the ready timeout (df5ba10642c5ec18064a67f8279d40d3b12baa76) ## [mmserver-v0.5.0] - 2024-10-15 ### New Features - Plumb controller input through to the server (990f48cdac4181e69ac3cb5dd1473fe16fca3390) - Allow specifying 'permanent' gamepads for a session (1d5b7f0a38017e0589c928a9acb6a10075bfac52) - Add support for native linux containerization (a37b0db8c5006e4c7b02cc98e506cd68a6ac2aa1) - Basic gamepad support (f0eceab777fd38cb085e0f5120fe54ab2a71d362) - List applications the server can launch (5d042be0f51095e06bbf68cdc3d3e40523c3e5ad) ### Bugfixes - Remove a bunch of dead code (b5e88bbe9e472866d9ddd5316a7a8187d7676778) - Add description field to application configs (d786828a87ce2c5ed18f373e3be06a1808ad5c42) - Include more context when reading config files (d39aaf46c09d2c6d4525dfb3b452374cd1476b9d) - Require app names to start with a letter (4182a506ea3a15809c42010ef88da1aeac12278d) - Handle unknown message types more gracefully (2978f9b2d41e4916f7a18905586466bb66e92c35) - Add application name to session spans (eccca93fd50530d7d658e8a69bb22ef1b689b5a4) - Sleep the compositor if no client is attached (e03d8f2914867cc733fa4b44f78f00f7f89ea361) - Make reattaching slightly more robust (10cfede5b4ef625f9961b3582ac7dab33cba6dd7) - If using layers > 0, pass that many rate control layers (3a201510794deaebf262a81e8b02e8a3d9359cfd) - Get hierarchical coding working on H265 (7b63cc694b28eb7fd1e9155a182e5446b80ef998) - Add some preflight checks at startup (91e00002073a1c07af73fb5a7f1e27a5779d66b3) - Improve shutdown behavior (5e77d7719313c2c6d53fa3335aec06840a9fe92a) - Use putenv instead of Command::env (0a832c0f606a9d130eeca0bcb334dc6c5d65e169) - Remove unshare as a dependency (e5c4575e3cacc9d00656cda7af114a0eb471777c) ## [mmserver-v0.4.1] - 2024-08-16 ### Bugfixes - Time out if the compositor doesn't accept an attachment in a reasonable timeframe (c1d6c6ca82fe3ff5ffcbf204c7f90e149b82f0ae) - Explicitly close QUIC streams when a worker finishes (a4b0c18e4af7455dcde689b241e4fe2737e50f57) - Never use 0 as a stream_seq (8fc95e4ef0d4a01d9c1809860a633c7417913115) - Raise the attachment timeout to account for slow clients (6b60df3e7625da72157b5a6ae8479e9e05469c71) - Set a default for video_profile (b4f2e01548ad0d374b4fc816f6a2a5c7c11f1751) - Correctly send vertical scroll events (6a25863b00f049d354dda5f598a3f507db653285) - Change order of press/release when simulating repeat (6df3f5cea5f8e6b2e2634f1307b2c4ee054ed638) ## [mmserver-v0.4.0] - 2024-08-02 ### New Features - Rewrite compositor from scratch (945a7793abbbc377f8c9ad1a852715203a16b097) - Allow attachments to be configured for HDR10 output (0c4b85af422378881f550f61882439b1a4abade1) - Support streaming in HDR (713dbbdce931e0ba98cc51bf144a2fe26dd9e2a1) ### Bugfixes - Improve compositor error messages with s/client/app (e5b24afe2ccd8ce77f74a5732a2e02f723256cda) ## [mmclient-v0.3.0] - 2024-08-02 ### New Features - Allow attachments to be configured for HDR10 output (0c4b85af422378881f550f61882439b1a4abade1) - Support playing HDR streams (12ef76930f729af0331bb83c3ceadb110bf22a6f) ### Bugfixes - Make --detach the default (7ca5ee3ea03bcc19f754c1542675be360e3216af) - Take name or id for --kill (7a1f8c1483bd43c292e5ec8189535b0e59fc453c) - Move the cursor before locking it (2a5cc571f868c7ade0c9798b41e96ee21209de4d) - Calculate RTT correctly (4762c1ab0594897949e4ce81a7897fab30d9c7fe) - Make sure session width/height are even (5a344ade0e3cd62c1c8e0f4b99d6be8dee7b513f) - Handle ConnectionClosed (953b9d4398ccca75b4108da0c31589c56747ff70) - Ensure --ui-scale overrides environment scaling (776b4dc2c5462a05c8520e769361f3136d5bcc6a) - Swap order of lock/warp when locking cursor on not-mac (525622b29d46fc8e659d0e3c37cf920faf587866) ## [mmclient-v0.2.0] - 2024-05-08 ### New Features - Cursor locking and relative motion (e11dfec7e42802a528ac8c8b4629044e6d6b1c3f) - Add --preset, for setting quality/bandwidth usage dynamically (6c590efaab02e31aae8413b683e8f8d228256b3b) ### Bugfixes - Don't sync every frame (5a7f1cfe11e6684e11bd618e2f1adf4d043640f5) ## [mmserver-v0.3.0] - 2024-05-08 ### New Features - Cursor locking and relative motion (e11dfec7e42802a528ac8c8b4629044e6d6b1c3f) - Add --preset, for setting quality/bandwidth usage dynamically (6c590efaab02e31aae8413b683e8f8d228256b3b) ### Bugfixes - Remove debugging code (152a1714ca950256f136757f47b7b2cf587d6880) - Un-transpose min and max QP (0570a6470b934e62dd4c9dcc42467a6db1a311e4) - Correctly set max QP on lower presets (b3f73533bb896c93d4a1d4e5c8efc336e329042c) - Prevent a segfault on nvidia (8b331b5de98a50dd3c59671a2dbfe37b966b95b9) - Re-send cursor status when reattaching (eba4a368c33a5bcd1cdf27a8b791f31ff466bb29) ## [mmclient-v0.1.2] - 2024-05-05 ### Bugfixes - Actually sync video and audio (4822bda39b4a5f07ed74e4fd76d5b080ea1c2078) - Tune verbosity of conn message (e9f0d18da517e1c7f1ab34d9c154b8ba70573f2e) - Fix typo in conn init (d8dd70b25952e1d1155bf8e6930d2304ca51c79e) ## [mmserver-v0.2.0] - 2024-05-05 ### New Features - Add enable_datagrams, off by default (e1dc976ee3228b006b874e077cd2c6cf7f784927) - Add glxinfo and eglinfo output to --bug-report (696464d9b980f1664e2b9dcce9e6f6dde83407f2) ### Bugfixes - Don't panic on dmabuf cursors (9f87ce7d99289ba31ad11b5d1796b992fd21c796) - Print version after initializing logging (f708ad2d8e5ddc9fb17ac023fef8f81706c31be7) - Handle full send queues more gracefully (face8776acea8c22e4d83b62c54ece5682f95cee) - Manually enable radv encode (26ba3f93f3da29921f9754181738f2087284a164) - Correctly expose a vulkan fn (2c627c94569050d0b53429204e8153119d268560) - Write xwayland logs to the bug report dir (0ba97f5f3bd72caf7df815e341c4c4f0a807b094) - Support older versions of xwayland with wl_drm (54c9724a476d023547fb1c2ccc5d74bc6eadc6a3) - Kill hung clients (5179e6688a2bc8fcceded03c0d92e2a00c38fb99) - Implement basic rate control (781c97e3efde247ef437ad2e19e8cdf57b6d216e) - Log entire config (b588f198d13122869936b52c0690e980586a7f88) - Garbage-collect partial writes (a095994de28ec31bd49a54c2d757493f41fc0c06) ## [mmclient-v0.1.1] - 2024-05-05 ### Bugfixes - Increase the default timeout when waiting for frames (a8aefcb295803d087349625a37e1fdef3f2ec9d7) - Handle video frames sent over the attachment stream (c0ecfba8fd5f06a64ab2e3c5d02731938a41170b) - Handle VideoChunk messages on the attachment stream (75f409d1b2c0685bf6e4413a44535798a7a53a71) - Handle AudioChunk messages on the attachment stream (3a63b07149fd36308d72378c66b53c41574abb1e) - Be more robust in the face of bad stream data (7c920b66451e615205cea7a8d229c068c340324c) - Respect hidden cursors (003fe97034cbbd71a8845841cf9d26e592c27696) ================================================ FILE: LICENSES/BUSL-1.1.txt ================================================ Business Source License 1.1 Parameters ---------- Licensor: Colin Marc Licensed Work: Magic Mirror Additional Use Grant: You may make use of the Licensed Work, provided that you may not use the Licensed Work for a Game Streaming or Remote Desktop service. A "Game Streaming or Remote Desktop service” is a commercial offering that allows third parties (other than your employees and contractors) to access the functionality of the Licensed Work, thereby utilizing graphics processing hardware owned or operated by you. Change Date: 2029-05-01 Change License: MIT License For information about alternative licensing arrangements for the Software, please contact the Licensor at hi@colinmarc.com. Notice The Business Source License (this document, or the “License”) is not an Open Source license. However, the Licensed Work will eventually be made available under an Open Source License, as stated in this License. License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. “Business Source License” is a trademark of MariaDB Corporation Ab. ----------------------------------------------------------------------------- Business Source License 1.1 Terms The Licensor hereby grants you the right to copy, modify, create derivative works, redistribute, and make non-production use of the Licensed Work. The Licensor may make an Additional Use Grant, above, permitting limited production use. Effective on the Change Date, or the fourth anniversary of the first publicly available distribution of a specific version of the Licensed Work under this License, whichever comes first, the Licensor hereby grants you rights under the terms of the Change License, and the rights granted in the paragraph above terminate. If your use of the Licensed Work does not comply with the requirements currently in effect as described in this License, you must purchase a commercial license from the Licensor, its affiliated entities, or authorized resellers, or you must refrain from using the Licensed Work. All copies of the original and modified Licensed Work, and derivative works of the Licensed Work, are subject to this License. This License applies separately for each version of the Licensed Work and the Change Date may vary for each version of the Licensed Work released by Licensor. You must conspicuously display this License on each original or modified copy of the Licensed Work. If you receive the Licensed Work in original or modified form from a third party, the terms and conditions set forth in this License apply to your use of that work. Any use of the Licensed Work in violation of this License will automatically terminate your rights under this License for the current and all other versions of the Licensed Work. This License does not grant you any right in any trademark or logo of Licensor or its affiliates (provided that you may use a trademark or logo of Licensor as expressly required by this License). TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. MariaDB hereby grants you permission to use this License’s text to license your works, and to refer to it using the trademark “Business Source License”, as long as you comply with the Covenants of Licensor below. Covenants of Licensor In consideration of the right to use this License’s text and the “Business Source License” name and trademark, Licensor covenants to MariaDB, and to all other recipients of the licensed work to be provided by Licensor: 1. To specify as the Change License the GPL Version 2.0 or any later version, or a license that is compatible with GPL Version 2.0 or a later version, where “compatible” means that software provided under the Change License can be included in a program with software provided under GPL Version 2.0 or a later version. Licensor may specify additional Change Licenses without limitation. 2. To either: (a) specify an additional grant of rights to use that does not impose any additional restriction on the right granted in this License, as the Additional Use Grant; or (b) insert the text “None”. 3. To specify a Change Date. 4. Not to modify this License in any other way. ================================================ FILE: LICENSES/MIT.txt ================================================ MIT License Copyright (c) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.md ================================================ # Magic Mirror 🪞✨ [![GitHub Actions Workflow Status](https://img.shields.io/github/actions/workflow/status/colinmarc/magic-mirror/tests.yaml)](https://github.com/colinmarc/magic-mirror/actions/workflows/tests.yaml) [![Discord](https://img.shields.io/discord/1284975819222945802?style=flat&label=discord&color=7289DA)](https://discord.gg/v22G644DzS) This is a game streaming and remote desktop tool for Linux hosts, featuring: - **Headless multitenant rendering:** Streamed applications are run offscreen, isolated from the rest of the system and any display hardware. - **No system dependencies:** The server is a single static binary, and there's no dependency on docker, pipewire, or any other systemwide setup. - **Native linux containerization:** apps are isolated in rootless containers with the equivalent of unshare(1), using new Linux namespace features - **High quality, tunable, 4k streaming:** See the [list of supported codecs](https://colinmarc.github.io/magic-mirror/setup/server/#hardware-software-encoding). 10-bit HDR support is in progress. - **Very low latency:** No extra CPU-GPU copy when using hardware encode. Total latency is less than one frame. - **Local cursor rendering:** Use the client-side cursor for minimal input lag. - **Client support for macOS and Linux:** A [SwiftUI client](https://github.com/colinmarc/magic-mirror-swiftui/releases/latest) is available for macOS, with tvOS/iOS support coming soon. > [!WARNING] > Alpha software! Please submit any issues you encounter. Run the server with `--bug-report` to generate detailed logs and record videos to attach to your report. ### Quick Links - [Documentation Book](https://colinmarc.github.io/magic-mirror) - [Latest Server Release [mmserver-v0.8.4]](https://github.com/colinmarc/magic-mirror/releases/tag/mmserver-v0.8.4) - [Latest CLI Client Release [mmclient-v0.7.0]](https://github.com/colinmarc/magic-mirror/releases/tag/mmclient-v0.7.0) - [Latest macOS Client Release](https://github.com/colinmarc/magic-mirror-swiftui/releases/latest) - [Discord](https://discord.gg/v22G644DzS) ================================================ FILE: auto-release.sh ================================================ #!/bin/sh -e die() { RED="\033[31m" RESET="\033[0m" echo -e "${RED}$1${RESET}" exit 1 } case $1 in "client" | "server") component=$1 ;; *) die "invalid component: $1" exit 1 ;; esac if [ -n "$(git status --untracked-files=no --porcelain)" ]; then die "working directory not clean; exiting" exit 1 fi branch="auto-bump-${component}" git fetch -q origin "${branch}" tag="$(git show -s --format=%s origin/${branch} | awk '{print $NF}')" if [ -n "$(git tag | grep ${tag})" ]; then die "tag exists" fi echo "bumping mm${component} to ${tag}..." git cherry-pick -S "origin/${branch}" echo "generating release notes..." release_notes="$(git cliff -v -c .github/workflows/cliff.toml \ --tag-pattern "${component}" \ --include-path "mm-${component}*/**/*" \ --unreleased --tag ${tag})" git tag ${tag} -a -m "${release_notes}" --cleanup=verbatim git show ${tag} ================================================ FILE: docs/.gitignore ================================================ # autogenerated content/reference build/ public/ ================================================ FILE: docs/config.toml ================================================ base_url = "https://colinmarc.github.io/magic-mirror" theme = "anemone" compile_sass = false build_search_index = false [markdown] highlight_code = true [extra] twitter_card = false header_nav = [ { url = "https://colinmarc.github.io/magic-mirror", name_en = "/home/"}, { url = "https://github.com/colinmarc/magic-mirror", name_en = "/github/"}, { url = "https://discord.gg/v22G644DzS", name_en = "/discord/"}, ] ================================================ FILE: docs/content/_index.md ================================================ +++ +++ # Magic Mirror 🪞✨ This page contains documentation for [Magic Mirror](https://github.com/colinmarc/magic-mirror), an open-source game streaming and remote desktop tool for linux hosts. ### Download These links always point to the latest release. - 💾 [Server [mmserver-v0.8.4]](https://github.com/colinmarc/magic-mirror/releases/tag/mmserver-v0.8.4) - 💾 [Command-Line Client [mmclient-v0.7.0]](https://github.com/colinmarc/magic-mirror/releases/tag/mmclient-v0.7.0) - 💾 [macOS GUI Client](https://github.com/colinmarc/magic-mirror-swiftui/releases/latest) ### Setup Guides Start here to get things up and running. - ⚙️ [Server Setup](@/setup/server.md) - ⚙️ [Client Setup](@/setup/client.md) ### Reference Autogenerated from the code. - 📖 [Configuration Reference](@/reference/config.md) - 📖 [Protocol Reference](@/reference/protocol.md) - 📖 [Rustdoc for `mm-protocol`](./doc/mm_protocol) - 📖 [Rustdoc for `mm-client-common`](./doc/mm_client_common) ### Contact Get help, report issues, make friends. - ⁉️ [Issue Tracker](https://github.com/colinmarc/magic-mirror/issues) - 💬 [Discord Chat](https://discord.gg/v22G644DzS) ================================================ FILE: docs/content/setup/client.md ================================================ +++ title = "Client Setup" [extra] toc = true +++ ## macOS GUI Client The native macOS client can be downloaded from [the releases page](https://github.com/colinmarc/magic-mirror-swiftui/releases/latest). It should work out of the box on ARM and Intel Macs running macOS 10.14 or later. ## Installing the commandline client There is also a cross-platform commandline client, `mmclient`. You can download it [here](https://github.com/colinmarc/magic-mirror/releases/tag/mmclient-v0.7.0). The commandline client requires `ffmpeg` 6.0 or later to be installed on the system. It also requires up-to-date Vulkan drivers. ## Building mmclient The following are required to build the client and its dependencies: ``` rust (MSRV 1.77.2) nasm cmake protoc libxkbcommon (linux only) libwayland-client (linux only) alsa (linux only) ffmpeg 6.x ``` Besides Rust itself, the following command will install everything on ubuntu: ``` apt install \ nasm cmake protobuf-compiler libxkbcommon-dev libwayland-dev libasound2-dev \ ffmpeg libavutil-dev libavformat-dev libavdevice-dev libavfilter-dev ``` Or using homebrew on macOS: ``` brew install nasm cmake ffmpeg@6 protobuf ``` ================================================ FILE: docs/content/setup/server.md ================================================ +++ title = "Server Setup" [extra] toc = true +++ ## Quickstart First, grab [the latest server release](https://github.com/colinmarc/magic-mirror/releases/tag/mmserver-v0.8.4) and untar it somewhere: ```sh curl -fsSL "https://github.com/colinmarc/magic-mirror/releases/download/mmserver-v0.8.4/mmserver-v0.8.4-linux-amd64.tar.gz" \ | tar zxv cd mmserver-v0.8.4 ``` Then, create a [configuration file](@/reference/config.md) with at least one application definition: ```toml # mmserver.toml [apps.steam-gamepadui] command = ["steam", "-gamepadui"] xwayland = true ``` Then you can start the server like so: ``` $ ./mmserver -C config.toml 2024-12-09T16:57:30.989261Z INFO mmserver: listening on [::1]:9599 ``` You can also create a configuration directory, and add a file (json or toml) for each application: ```sh mkdir apps.d echo 'command = ["steam", "-gamepadui"]' > apps.d/steam.toml ./mmserver -i apps.d ``` ## Connectivity By default, mmserver only listens on `localhost`, which is not terribly useful. There are a few different options to configure which socket address the server listens for connections on. The easiest is to bind to a local IP, or use a VPN like wireguard or tailscale: ```toml # config.toml [server] bind = "192.168.1.37:9599" ``` Or from the command line: ```sh mmserver --bind $(tailscale ip -4):9599 ``` If you'd like to stream on a public IP, or on all interfaces (with `0.0.0.0`), mmserver requires that you set up a TLS certificate and key: ```toml # config.toml [server] tls_cert = "/path/to/tls.key" tls_key = "/path/to/tls.cert" ``` Generating such certificates and adding them to the client is out of scope for this guide. Note that while all Magic Mirror traffic is encrypted with TLS (whether you supply certificates or not), no _authentication_ is performed on incoming connections. Finally, you can also use `--bind-systemd` or `bind_systemd = true` to bind to a [systemd socket](https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html). ## System Requirements The following is required to run the server: - Linux 6.x (for Ubuntu, this means Mantic or Noble) - (For AMD/Intel cards) Mesa 24.3.x or later - (For NVIDIA cards) [Vulkan drivers](https://developer.nvidia.com/vulkan-driver) version 550 or later - XWayland (for X11 apps) ## Hardware encoding Magic Mirror uses hardware-based video compression codecs to stream the game over the wire. To see if your GPU supports video encoding, see the following matrix for your vendor: - [AMD](https://en.wikipedia.org/wiki/Unified_Video_Decoder#Format_support) - [NVIDIA](https://developer.nvidia.com/video-encode-and-decode-gpu-support-matrix-new) | Codec | AMD | NVIDIA | Intel | | ----- | :-: | :----: | :---: | | H.264 | ✅ | ✅ | ❔ | | H.265 | ✅ | ✅ | ❔ | | AV1 | ❌ | ❌ | ❌ | ## Building `mmserver` from source The following are required to build the server and its dependencies: ``` rust (MSRV 1.77.2) nasm cmake protoc libxkbcommon ``` Besides Rust itself, the following command will install everything on ubuntu: ``` apt install nasm cmake protobuf-compiler libxkbcommon-dev ``` Then you should be good to go: ``` cd mm-server cargo build --bin mmserver [--release] ``` ================================================ FILE: docs/templates/footer.html ================================================ ================================================ FILE: mm-client/Cargo.toml ================================================ # Copyright 2024 Colin Marc # # SPDX-License-Identifier: MIT [package] name = "mm-client" version = "0.7.0" edition = "2021" [[bin]] name = "mmclient" path = "src/bin/mmclient.rs" [[bin]] name = "latency-test" path = "src/bin/latency-test.rs" [dependencies] anyhow = "1" ash = "0.38" ash-window = "0.13.0" bytes = "1" clap = { version = "4", features = ["derive"] } cpal = "0.15" crossbeam-channel = "0.5" cstr = "0.2" ffmpeg-next = "7" ffmpeg-sys-next = "7" font-kit = "0.11" gilrs = "0.10" glam = "0.26" histo = "1" humantime = "2" image = { version = "0.25", default-features = false, features = ["png"] } imgui = { version = "0.12.0", features = ["tables-api"] } imgui-sys = "0.12.0" imgui-winit-support = "0.13.0" imgui-rs-vulkan-renderer = { version = "1.16.0", features = ["dynamic-rendering"] } lazy_static = "1" oneshot = { version = "0.1", default-features = false, features = ["std"] } opus = "0.3" pollster = "0.3" rand = "0.8" raw-window-handle = "0.5" simple_moving_average = "1" tabwriter = "1" tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["time", "env-filter"] } tracy-client = { version = "0.17", default-features = false } tracing-tracy = { version = "0.11", default-features = false } [dependencies.mm-protocol] path = "../mm-protocol" [dependencies.mm-client-common] path = "../mm-client-common" [dependencies.dasp] version = "0.11" features = ["slice", "signal", "interpolate", "interpolate-linear"] [dependencies.winit] version = "0.30" default-features = false features = ["wayland", "x11", "rwh_06"] [target.'cfg(target_os = "macos")'.dependencies] ash-molten = { version = "0.18", optional = true } [build-dependencies.slang] git = "https://github.com/colinmarc/slang-rs" rev = "075daa4faa8d1ab6d7bfbb5293812b087a527207" # Uses SLANG_DIR if set, otherwise builds slang from source features = ["from-source"] [features] default = [] moltenvk_static = ["dep:ash-molten"] tracy = ["tracy-client/enable"] ================================================ FILE: mm-client/build.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT // extern crate shaderc; use std::path::PathBuf; extern crate slang; fn main() { let mut session = slang::GlobalSession::new(); let out_dir = std::env::var("OUT_DIR").map(PathBuf::from).unwrap(); compile_shader( &mut session, "src/render.slang", out_dir.join("shaders/frag.spv").to_str().unwrap(), "frag", slang::Stage::Fragment, ); compile_shader( &mut session, "src/render.slang", out_dir.join("shaders/vert.spv").to_str().unwrap(), "vert", slang::Stage::Vertex, ); } fn compile_shader( session: &mut slang::GlobalSession, in_path: &str, out_path: &str, entry_point: &str, stage: slang::Stage, ) { std::fs::create_dir_all(PathBuf::from(out_path).parent().unwrap()) .expect("failed to create output directory"); let mut compile_request = session.create_compile_request(); compile_request .add_search_path("../shader-common") .set_codegen_target(slang::CompileTarget::Spirv) .set_optimization_level(slang::OptimizationLevel::Maximal) .set_target_profile(session.find_profile("glsl_460")); let entry_point = compile_request .add_translation_unit(slang::SourceLanguage::Slang, None) .add_source_file(in_path) .add_entry_point(entry_point, stage); let shader_bytecode = compile_request .compile() .expect("Shader compilation failed."); std::fs::write(out_path, shader_bytecode.get_entry_point_code(entry_point)) .expect("failed to write shader bytecode to file"); println!("cargo::rerun-if-changed={}", in_path); } ================================================ FILE: mm-client/src/audio/buffer.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::collections::VecDeque; pub struct PlaybackBuffer where F: dasp::Frame, { /// A queue of audio frames. samples: VecDeque, /// The PTS and packet length (in frames) for each packet. Kept in sync with /// `samples`. pts: VecDeque<(u64, usize)>, } impl PlaybackBuffer where F: dasp::Frame, { pub fn new() -> Self { PlaybackBuffer { samples: VecDeque::new(), pts: VecDeque::new(), } } /// Returns the number of frames in the buffer. pub fn len(&self) -> usize { self.samples.len() } /// Adds frames to the back of the buffer. pub fn buffer(&mut self, pts: u64, frames: &[F]) { self.pts.push_back((pts, frames.len())); self.samples.extend(frames.iter()); } /// Returns the PTS of the head packet in the audio buffer. pub fn current_pts(&self) -> u64 { self.pts .front() .expect("current_pts called before buffer") .0 } /// Returns an iterator that pops frames from the front of the buffer. pub fn drain(&mut self) -> Draining { Draining { buffer: self } } /// Discards the first N frames from the buffer. pub fn skip(&mut self, frames: usize) { self.samples.drain(..frames); let mut remaining = frames; loop { let (_, len) = self.pts.front_mut().expect("skip called before buffer"); if *len <= remaining { remaining -= *len; self.pts.pop_front(); } else { *len -= remaining; break; } } } } pub struct Draining<'a, F> where F: dasp::Frame, { buffer: &'a mut PlaybackBuffer, } impl Iterator for Draining<'_, F> where F: dasp::Frame, { type Item = F; fn next(&mut self) -> Option { let frame = self.buffer.samples.pop_front()?; if let Some((_, remaining)) = self.buffer.pts.front_mut() { *remaining -= 1; if *remaining == 0 { self.buffer.pts.pop_front(); } } Some(frame) } } ================================================ FILE: mm-client/src/audio.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT mod buffer; use std::{ sync::{Arc, Mutex}, time, }; use anyhow::{bail, Context as _}; use buffer::PlaybackBuffer; use cpal::traits::{DeviceTrait as _, HostTrait as _, StreamTrait}; use crossbeam_channel as crossbeam; use dasp::Signal; use mm_client_common as client; use tracing::{debug, error, info, trace}; trait DecodePacket { fn decode(&mut self, input: &[u8], output: &mut [T]) -> anyhow::Result; } impl DecodePacket for opus::Decoder { fn decode(&mut self, packet: &[u8], output: &mut [f32]) -> anyhow::Result { let len = self.decode_float(packet, output, false)?; Ok(len) } } impl DecodePacket for opus::Decoder { fn decode(&mut self, packet: &[u8], output: &mut [i16]) -> anyhow::Result { let len = self.decode(packet, output, false)?; Ok(len) } } // This is a trait object so we can erase the sample/frame generic type. trait StreamWrapper { #[allow(clippy::new_ret_no_self)] fn new( device: &cpal::Device, conf: cpal::StreamConfig, ) -> anyhow::Result<(Box, cpal::Stream)> where Self: Sized; fn sync(&mut self, pts: u64); fn send_packet(&mut self, packet: Arc) -> anyhow::Result<()>; } struct StreamInner { sync_point: Arc>>, _buffer: Arc>>, thread_handle: Option>>, undecoded_tx: Option>>, } impl StreamWrapper for StreamInner where F: dasp::Frame + Send + 'static, F::Sample: cpal::SizedSample + dasp::sample::Duplex + Default, opus::Decoder: DecodePacket, for<'a> &'a [F::Sample]: dasp::slice::ToFrameSlice<'a, F>, { fn new( device: &cpal::Device, conf: cpal::StreamConfig, ) -> anyhow::Result<(Box, cpal::Stream)> { let sample_rate = conf.sample_rate.0; let mut decoder = { let ch = match F::CHANNELS { 1 => opus::Channels::Mono, 2 => opus::Channels::Stereo, _ => bail!("unsupported number of channels: {}", F::CHANNELS), }; opus::Decoder::new(sample_rate, ch)? }; let buffer = Arc::new(Mutex::new(PlaybackBuffer::new())); let (undecoded_tx, undecoded_recv) = crossbeam::unbounded::>(); // Spawn a thread to eagerly decode packets. let buffer_clone = buffer.clone(); let thread_handle = std::thread::Builder::new() .name("audio decode".into()) .spawn(move || { // Handles up to 100ms of decoded audio. let mut output = vec![Default::default(); (sample_rate * F::CHANNELS as u32 / 10) as usize]; loop { let packet = match undecoded_recv.recv() { Ok(packet) => packet, Err(crossbeam::RecvError) => return Ok(()), }; let pts = packet.pts(); let packet = packet.data(); match DecodePacket::decode(&mut decoder, &packet, &mut output) { Ok(len) => { if len == 0 { continue; } let frames = dasp::slice::to_frame_slice(&output[..(len * F::CHANNELS)]) .expect("invalid sample count"); let mut guard = buffer_clone.lock().unwrap(); guard.buffer(pts, frames); #[cfg(feature = "tracy")] { let len_us = guard.len() as f64 / sample_rate as f64 * 1_000_000.0; tracy_client::plot!("audio buffer (μs)", len_us); } } Err(e) => { error!("opus decode error: {}", e); continue; } }; } })?; // The current PTS of the video stream, which we want to sync to. let sync_point = Arc::new(Mutex::new(None)); let sync_point_clone = sync_point.clone(); let buffer_clone = buffer.clone(); let stream = device.build_output_stream( &conf, move |out, _info| { let mut buffer = buffer_clone.lock().unwrap(); let frames_needed = out.len() / F::CHANNELS; let frames_remaining = buffer.len(); // In frames. let frames_per_ms = sample_rate / 1000; if frames_remaining < frames_needed { out.fill(Default::default()); trace!("audio buffer underrun"); return; } let sync_point: Option<(u64, time::Instant)> = sync_point_clone.lock().unwrap().as_ref().copied(); if let Some((pts, ts)) = sync_point { let target_pts = pts + ts.elapsed().as_millis() as u64; let pts = buffer.current_pts(); let delay = target_pts as i64 - pts as i64; #[cfg(feature = "tracy")] tracy_client::plot!("audio drift (ms)", delay as f64); // Outside these bounds, skip or play silence in order to sync. const TOO_EARLY: i64 = 20; const TOO_LATE: i64 = 60; if delay < TOO_EARLY { // Play silence until the video catches up. out.fill(Default::default()); return; } if delay > TOO_LATE { // Skip ahead. let skip = std::cmp::min( (delay * frames_per_ms as i64) as usize, frames_remaining.saturating_sub(frames_needed * 2), ); buffer.skip(skip); } } let mut signal = dasp::signal::from_iter(buffer.drain()).into_interleaved_samples(); for sample in out.iter_mut() { *sample = signal.next_sample(); } #[cfg(feature = "tracy")] { let len_us = buffer.len() as f64 / sample_rate as f64 * 1_000_000.0; tracy_client::plot!("audio buffer (μs)", len_us); } }, move |err| { error!("audio playback error: {}", err); }, None, )?; Ok(( Box::new(Self { // decoded_packets, _buffer: buffer, sync_point, thread_handle: Some(thread_handle), undecoded_tx: Some(undecoded_tx), }), stream, )) } fn sync(&mut self, pts: u64) { *self.sync_point.lock().unwrap() = Some((pts, time::Instant::now())); } fn send_packet(&mut self, packet: Arc) -> anyhow::Result<()> { self.undecoded_tx .as_ref() .unwrap() .send(packet) .map_err(|_| anyhow::anyhow!("audio decode thread died"))?; Ok(()) } } impl Drop for StreamInner { fn drop(&mut self) { let _ = self.undecoded_tx.take(); if let Some(handle) = self.thread_handle.take() { match handle.join() { Ok(Ok(())) => (), Ok(Err(e)) => { error!("audio decode thread error: {}", e); } Err(_) => { error!("audio decode thread panicked"); } } } } } pub struct AudioStream { device: cpal::Device, stream: Option, inner: Option>, stream_waiting: bool, stream_seq: u64, packet_count: u64, } impl AudioStream { pub fn new() -> anyhow::Result { let device = cpal::default_host() .default_output_device() .context("unable to find default audio output device")?; info!("using audio output device: {}", device.name()?); Ok(Self { device, stream: None, inner: None, stream_waiting: true, packet_count: 0, stream_seq: 0, }) } pub fn sync(&mut self, pts: u64) { if let Some(inner) = &mut self.inner { inner.sync(pts); } } pub fn reset( &mut self, stream_seq: u64, sample_rate: u32, channels: u32, ) -> anyhow::Result<()> { debug!( stream_seq, sample_rate, channels, "starting or restarting audio stream" ); let (format, conf) = select_conf(&self.device, sample_rate, channels)?; let (inner, stream) = match (format, channels) { (cpal::SampleFormat::F32, 1) => StreamInner::<[f32; 1]>::new(&self.device, conf), (cpal::SampleFormat::F32, 2) => StreamInner::<[f32; 2]>::new(&self.device, conf), (cpal::SampleFormat::I16, 1) => StreamInner::<[i16; 1]>::new(&self.device, conf), (cpal::SampleFormat::I16, 2) => StreamInner::<[i16; 2]>::new(&self.device, conf), _ => bail!("unsupported sample rate / format"), }?; self.stream_seq = stream_seq; self.stream = Some(stream); self.inner = Some(inner); self.stream_waiting = true; self.packet_count = 0; Ok(()) } pub fn recv_packet(&mut self, packet: Arc) -> anyhow::Result<()> { if let Some(inner) = &mut self.inner { trace!( stream_seq = packet.stream_seq(), seq = packet.seq(), pts = packet.pts(), len = packet.len(), "received full audio packet" ); self.packet_count += 1; inner.send_packet(packet)?; } if self.stream.is_some() && self.stream_waiting && self.packet_count > 2 { self.stream_waiting = false; self.stream.as_ref().unwrap().play()?; } Ok(()) } } fn select_conf( device: &cpal::Device, sample_rate: u32, channels: u32, ) -> anyhow::Result<(cpal::SampleFormat, cpal::StreamConfig)> { let mut confs = device .supported_output_configs() .context("unable to query supported audio playback formats")?; let valid = |format: cpal::SampleFormat| { move |conf: &cpal::SupportedStreamConfigRange| { conf.sample_format() == format && conf.min_sample_rate() <= cpal::SampleRate(sample_rate) && conf.max_sample_rate() >= cpal::SampleRate(sample_rate) && conf.channels() == channels as u16 } }; if let Some(conf_range) = confs .find(valid(cpal::SampleFormat::F32)) .or_else(|| confs.find(valid(cpal::SampleFormat::I16))) { let sample_format = conf_range.sample_format(); let buffer_size = match conf_range.buffer_size() { cpal::SupportedBufferSize::Unknown => cpal::BufferSize::Default, cpal::SupportedBufferSize::Range { min, .. } => { cpal::BufferSize::Fixed(std::cmp::max(*min, sample_rate / 100)) } }; let mut conf = cpal::StreamConfig::from(conf_range.with_sample_rate(cpal::SampleRate(sample_rate))); conf.buffer_size = buffer_size; return Ok((sample_format, conf)); } bail!("no valid audio output configuration found"); } ================================================ FILE: mm-client/src/bin/latency-test.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::{sync::Arc, time}; use anyhow::{bail, Context as _}; use ash::vk; use clap::Parser; use mm_client::{ delegate::{AttachmentEvent, AttachmentProxy}, video::*, vulkan::*, }; use mm_client_common as client; use mm_protocol as protocol; use pollster::FutureExt as _; use tracing::{debug, error, warn}; use winit::event_loop::EventLoop; const APP_DIMENSION: u32 = 256; const DEFAULT_TIMEOUT: time::Duration = time::Duration::from_secs(1); #[derive(Debug, Parser)] #[command(name = "mmclient")] #[command(about = "The Magic Mirror reference client", long_about = None)] struct Cli { /// The server to connect to. #[arg(value_name = "HOST[:PORT]")] host: String, /// The codec to use. Defaults to h265. #[arg(long)] codec: Option, /// The framerate to use. Defaults to 60. #[arg(long)] framerate: Option, /// The number of tests to run. Defaults to 256. #[arg(short('n'), long)] samples: Option, } pub enum AppEvent { VideoStreamReady(Arc, VideoStreamParams), VideoFrameAvailable, AttachmentEvent(AttachmentEvent), } impl std::fmt::Debug for AppEvent { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { use AppEvent::*; match self { VideoStreamReady(_, params) => write!(f, "VideoStreamReady({params:?})"), VideoFrameAvailable => write!(f, "VideoFrameAvailable"), AttachmentEvent(ev) => std::fmt::Debug::fmt(ev, f), } } } impl From for AppEvent { fn from(event: AttachmentEvent) -> Self { Self::AttachmentEvent(event) } } impl From for AppEvent { fn from(event: VideoStreamEvent) -> Self { use VideoStreamEvent::*; match event { VideoStreamReady(tex, params) => AppEvent::VideoStreamReady(tex, params), VideoFrameAvailable => AppEvent::VideoFrameAvailable, } } } struct App { client: client::Client, args: Cli, proxy: winit::event_loop::EventLoopProxy, win: Option, } struct LatencyTest { attachment: client::Attachment, session_id: u64, stream: VideoStream, video_texture: Option>, frames_recvd: usize, copy_cb: vk::CommandBuffer, copy_fence: vk::Fence, copy_buffer: VkHostBuffer, next_block: usize, block_started: time::Instant, num_tests: usize, histogram: histo::Histogram, first_frame_recvd: Option, total_video_bytes: usize, vk: Arc, } fn main() -> anyhow::Result<()> { init_logging()?; let args = Cli::parse(); // Invisible window. let event_loop: EventLoop = EventLoop::with_user_event().build()?; let proxy = event_loop.create_proxy(); let client = client::Client::new(&args.host, "latency-test", time::Duration::from_secs(1)) .block_on() .context("failed to connect")?; let mut app = App { client, args, proxy, win: None, }; event_loop.run_app(&mut app)?; if let Some(win) = app.win.take() { drop(win.stream); unsafe { win.vk .device .free_command_buffers(win.vk.present_queue.command_pool, &[win.copy_cb]); win.vk.device.destroy_fence(win.copy_fence, None); destroy_host_buffer(&win.vk.device, &win.copy_buffer); } println!("{}", win.histogram); if let Some(first_frame_recvd) = win.first_frame_recvd { println!( "transfer rate: {:.2} mpbs ({:.2}kb per frame)", win.total_video_bytes as f64 * 8.0 / 1_000_000.0 / first_frame_recvd.elapsed().as_secs_f64(), win.total_video_bytes as f64 / 1_000.0 / win.frames_recvd as f64 ); } } Ok(()) } impl winit::application::ApplicationHandler for App { fn resumed(&mut self, event_loop: &winit::event_loop::ActiveEventLoop) { if self.win.is_some() { return; } match start_test(&self.args, &self.client, event_loop, self.proxy.clone()) { Ok(w) => { self.win = Some(w); } Err(e) => { error!("failed to start test: {:#}", e); event_loop.exit(); } } } fn window_event( &mut self, _event_loop: &winit::event_loop::ActiveEventLoop, _window_id: winit::window::WindowId, _event: winit::event::WindowEvent, ) { } fn about_to_wait(&mut self, event_loop: &winit::event_loop::ActiveEventLoop) { let Some(win) = &self.win else { return; }; if win.block_started.elapsed() > time::Duration::from_secs(3) { error!("timed out waiting for block"); event_loop.exit(); } } fn user_event(&mut self, event_loop: &winit::event_loop::ActiveEventLoop, event: AppEvent) { let Some(win) = &mut self.win else { return; }; match win.event(event) { Ok(true) => (), Ok(false) => event_loop.exit(), Err(e) => { error!("error: {}", e); event_loop.exit(); } } } fn exiting(&mut self, _event_loop: &winit::event_loop::ActiveEventLoop) { let Some(win) = &self.win else { return; }; let _ = win.attachment.detach().block_on(); let _ = self .client .end_session(win.session_id, DEFAULT_TIMEOUT) .block_on(); } } impl LatencyTest { fn event(&mut self, event: AppEvent) -> anyhow::Result { match event { AppEvent::AttachmentEvent(AttachmentEvent::VideoStreamStart(stream_seq, params)) => { assert_eq!(params.width, APP_DIMENSION); assert_eq!(params.height, APP_DIMENSION); self.stream .reset(stream_seq, APP_DIMENSION, APP_DIMENSION, params.codec)?; } AppEvent::AttachmentEvent(AttachmentEvent::VideoPacket(packet)) => { if self.first_frame_recvd.is_none() { self.first_frame_recvd = Some(time::Instant::now()); } self.total_video_bytes += packet.len(); self.stream.recv_packet(packet)?; } AppEvent::AttachmentEvent(AttachmentEvent::AttachmentEnded) => { bail!("server closed connection"); } AppEvent::AttachmentEvent(_) => (), AppEvent::VideoStreamReady(tex, params) => { assert_eq!(params.width, APP_DIMENSION); assert_eq!(params.height, APP_DIMENSION); self.video_texture = Some(tex); } AppEvent::VideoFrameAvailable => { if self.stream.prepare_frame()?.is_some() { self.frames_recvd += 1; match self.frames_recvd.cmp(&100) { std::cmp::Ordering::Less => (), std::cmp::Ordering::Equal => { debug!("starting test..."); self.send_space(); self.block_started = time::Instant::now(); self.next_block = 0; } std::cmp::Ordering::Greater => { self.check_frame()?; if self.next_block >= self.num_tests { return Ok(false); } } } } } } Ok(true) } fn send_space(&mut self) { debug!("sending space"); self.attachment.keyboard_input( client::input::Key::Space, client::input::KeyState::Pressed, 0, ); self.attachment.keyboard_input( client::input::Key::Space, client::input::KeyState::Released, 0, ); } fn check_frame(&mut self) -> anyhow::Result<()> { unsafe { self.submit_copy()?; } // Check the current block. if self.check_block(self.next_block.wrapping_sub(1)) { // Waiting... } else if self.check_block(self.next_block) { // Success! let elapsed = self.block_started.elapsed(); debug!("block {} took {}ms", self.next_block, elapsed.as_millis()); self.histogram.add(elapsed.as_millis() as u64); // Start the next one. // Sleep 10-100ms. use rand::Rng; let ms = (rand::thread_rng().gen::() % 90) + 10; std::thread::sleep(time::Duration::from_millis(ms)); self.next_block += 1; self.block_started = time::Instant::now(); self.send_space(); } else if self.next_block > 0 { warn!("neither current or next block are highlighted"); } if self.block_started.elapsed() > time::Duration::from_secs(3) { bail!("timed out waiting for block {}", self.next_block); } Ok(()) } fn check_block(&mut self, idx: usize) -> bool { let data = unsafe { std::slice::from_raw_parts(self.copy_buffer.access as *mut u8, 256 * 256) }; // Blocks are arranged in an 8x8 grid, and are 32x32 pixels. let idx = idx % 64; let y = (idx / 8) * 32 + 16; let x = (idx % 8) * 32 + 16; data[y * 256 + x] > 20 } unsafe fn submit_copy(&mut self) -> anyhow::Result<()> { let device = &self.vk.device; let texture = self.video_texture.as_ref().unwrap(); // Reset the command buffer. device.reset_command_buffer(self.copy_cb, vk::CommandBufferResetFlags::empty())?; // Begin the command buffer. { let begin_info = vk::CommandBufferBeginInfo::default() .flags(vk::CommandBufferUsageFlags::SIMULTANEOUS_USE); device.begin_command_buffer(self.copy_cb, &begin_info)?; } // Transfer the image to be readable. cmd_image_barrier( device, self.copy_cb, texture.image, vk::PipelineStageFlags::TOP_OF_PIPE, vk::AccessFlags::empty(), vk::PipelineStageFlags::TRANSFER, vk::AccessFlags::TRANSFER_READ, vk::ImageLayout::UNDEFINED, vk::ImageLayout::TRANSFER_SRC_OPTIMAL, ); // Copy the texture to the staging buffer. { let region = vk::BufferImageCopy::default() .buffer_row_length(256) .buffer_image_height(256) .image_subresource(vk::ImageSubresourceLayers { aspect_mask: vk::ImageAspectFlags::PLANE_0, mip_level: 0, base_array_layer: 0, layer_count: 1, }) .image_extent(vk::Extent3D { width: 256, height: 256, depth: 1, }); let regions = [region]; device.cmd_copy_image_to_buffer( self.copy_cb, texture.image, vk::ImageLayout::TRANSFER_SRC_OPTIMAL, self.copy_buffer.buffer, ®ions, ) } device.end_command_buffer(self.copy_cb)?; device.reset_fences(&[self.copy_fence])?; device.queue_submit( self.vk.present_queue.queue, &[vk::SubmitInfo::default().command_buffers(&[self.copy_cb])], self.copy_fence, )?; device.wait_for_fences(&[self.copy_fence], true, u64::MAX)?; Ok(()) } } fn start_test( args: &Cli, client: &client::Client, event_loop: &winit::event_loop::ActiveEventLoop, proxy: winit::event_loop::EventLoopProxy, ) -> anyhow::Result { let attr = winit::window::Window::default_attributes().with_visible(false); let window = Arc::new(event_loop.create_window(attr)?); let vk = unsafe { Arc::new(VkContext::new(window.clone(), cfg!(debug_assertions))?) }; let codec = match args.codec.as_deref() { Some("h264") => protocol::VideoCodec::H264, Some("h265") | None => protocol::VideoCodec::H265, Some("av1") => protocol::VideoCodec::Av1, Some(v) => bail!("invalid codec: {:?}", v), }; // Create session, attach let sess = client .launch_session( "latency-test".to_string(), client::display_params::DisplayParams { width: APP_DIMENSION, height: APP_DIMENSION, framerate: args.framerate.unwrap_or(60), ui_scale: client::pixel_scale::PixelScale::ONE, }, vec![], DEFAULT_TIMEOUT, ) .block_on() .context("failed to create session")?; let config = client::AttachmentConfig { width: APP_DIMENSION, height: APP_DIMENSION, video_codec: codec.into(), video_profile: None, quality_preset: Some(6), audio_codec: None, sample_rate: None, channels: vec![], video_stream_seq_offset: 0, audio_stream_seq_offset: 0, }; let delegate = Arc::new(AttachmentProxy::new(proxy.clone())); let attachment = client .attach_session(sess.id, config, delegate, DEFAULT_TIMEOUT) .block_on() .context("failed to attach")?; // Just big enough for the Y plane. let copy_buffer = create_host_buffer( &vk.device, vk.device_info.host_visible_mem_type_index, vk::BufferUsageFlags::TRANSFER_DST, (APP_DIMENSION * APP_DIMENSION) as usize, )?; let copy_cb = create_command_buffer(&vk.device, vk.present_queue.command_pool)?; let copy_fence = create_fence(&vk.device, false)?; Ok(LatencyTest { attachment, session_id: sess.id, stream: VideoStream::new(vk.clone(), proxy.clone()), video_texture: None, frames_recvd: 0, copy_cb, copy_fence, copy_buffer, next_block: 0, block_started: time::Instant::now(), num_tests: args.samples.unwrap_or(256), histogram: histo::Histogram::with_buckets(10), first_frame_recvd: None, total_video_bytes: 0, vk: vk.clone(), }) } fn init_logging() -> anyhow::Result<()> { if let Ok(env_filter) = tracing_subscriber::EnvFilter::try_from_default_env() { tracing_subscriber::fmt().with_env_filter(env_filter).init(); } else { let filter = tracing_subscriber::EnvFilter::builder() .with_default_directive(tracing::level_filters::LevelFilter::INFO.into()) .from_env()? .add_directive("mm_client=info".parse()?) .add_directive("mm_client_common=info".parse()?); tracing_subscriber::fmt().with_env_filter(filter).init(); } Ok(()) } ================================================ FILE: mm-client/src/bin/mmclient.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::{sync::Arc, time}; use anyhow::{anyhow, bail}; use clap::Parser; use ffmpeg_sys_next as ffmpeg_sys; use mm_client::{ audio, cursor::{cursor_icon_from_proto, load_cursor_image}, delegate::{AttachmentEvent, AttachmentProxy}, flash::Flash, gamepad::{spawn_gamepad_monitor, GamepadEvent}, keys::winit_key_to_proto, overlay::Overlay, render::Renderer, stats::STATS, video::{self, VideoStreamEvent}, vulkan, }; use mm_client_common as client; use mm_protocol as protocol; use pollster::FutureExt as _; use tracing::{debug, error, info, trace, warn}; use tracing_subscriber::Layer as _; use winit::{event_loop::ControlFlow, window}; const DEFAULT_CONNECT_TIMEOUT: time::Duration = time::Duration::from_secs(1); const DEFAULT_REQUEST_TIMEOUT: time::Duration = time::Duration::from_secs(30); const MAX_FRAME_TIME: time::Duration = time::Duration::from_nanos(1_000_000_000 / 24); const RESIZE_COOLDOWN: time::Duration = time::Duration::from_millis(500); #[derive(Debug, Default, Copy, Clone, PartialEq, Eq)] enum Resolution { #[default] Auto, Height(u32), Custom(u32, u32), } impl From<&str> for Resolution { fn from(s: &str) -> Self { if s == "auto" { Resolution::Auto } else if let Some((w, h)) = s.split_once('x') { Resolution::Custom( w.parse().expect("invalid resolution width"), h.parse().expect("invalid resolution height"), ) } else { Resolution::Height(s.parse().expect("invalid resolution height")) } } } #[derive(Debug, Parser)] #[command(name = "mmclient")] #[command(about = "The Magic Mirror reference client", long_about = None)] struct Cli { /// The server to connect to. #[arg(value_name = "HOST[:PORT]")] host: String, /// The id of the app, or the ID of an existing session. app: Option, /// Print a list of launchable applications and exit. #[arg(long)] list_apps: bool, /// Print a list of matching sessions and exit. #[arg(short = 'L', long)] list: bool, /// End a session (which may be specified by name or ID) and exit. #[arg(short = 'K', long)] kill: bool, /// Always resume an existing session, and error if none match. #[arg(short, long)] resume: bool, /// Always launch a new session, even if one exists that matches. #[arg(short, long)] launch: bool, /// On exit, automatically kill the session. #[arg(short = 'x', long)] kill_on_exit: bool, /// The streaming resolution to use. If not specified, this will be tied to /// the client resolution, and automatically change when the client window /// resizes. #[arg(long, required = false, default_value = "auto")] resolution: Resolution, /// Request 10-bit video output from the server. This will only work if /// both your display and the application in question support rendering /// HDR color. #[arg(long, required = false)] hdr: bool, /// The UI scale to communicate to the server. If not specified, this will /// be determined from the client-side window scale factor. #[arg(long, required = false)] ui_scale: Option, /// Video codec to use. #[arg(long, default_value = "h265")] codec: Option, /// Framerate to render at on the server side. #[arg(long, default_value = "30")] framerate: u32, /// The quality preset to use, from 0-9. #[arg(short, long, default_value = "6")] preset: u32, /// Open in fullscreen mode. #[arg(long)] fullscreen: bool, /// Enable the overlay, which shows various stats. #[arg(long)] overlay: bool, } struct AttachmentWindow { configured_resolution: Resolution, configured_ui_scale: Option, configured_framerate: u32, window: Arc, attachment: client::Attachment, attachment_config: client::AttachmentConfig, delegate: Arc>, session: client::Session, video_stream: video::VideoStream, audio_stream: audio::AudioStream, renderer: Renderer, window_width: u32, window_height: u32, window_ui_scale: f64, minimized: bool, next_frame: time::Instant, last_frame_received: time::Instant, resize_cooldown: Option, needs_refresh: Option, refresh_cooldown: Option, cursor_modifiers: winit::keyboard::ModifiersState, cursor_pos: Option<(f64, f64)>, flash: Flash, overlay: Option, stats_timer: time::Instant, _vk: Arc, } struct App { client: client::Client, args: Cli, attachment_window: Option, proxy: winit::event_loop::EventLoopProxy, end_session_on_exit: bool, } pub enum AppEvent { VideoStreamReady(Arc, video::VideoStreamParams), VideoFrameAvailable, AttachmentEvent(AttachmentEvent), GamepadEvent(GamepadEvent), } impl From for AppEvent { fn from(event: VideoStreamEvent) -> Self { use VideoStreamEvent::*; match event { VideoStreamReady(tex, params) => AppEvent::VideoStreamReady(tex, params), VideoFrameAvailable => AppEvent::VideoFrameAvailable, } } } impl From for AppEvent { fn from(value: AttachmentEvent) -> Self { Self::AttachmentEvent(value) } } impl From for AppEvent { fn from(event: GamepadEvent) -> Self { Self::GamepadEvent(event) } } impl std::fmt::Debug for AppEvent { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { AppEvent::VideoStreamReady(_, params) => write!(f, "VideoStreamReady({params:?})"), AppEvent::VideoFrameAvailable => write!(f, "VideoFrameAvailable"), AppEvent::AttachmentEvent(ev) => std::fmt::Debug::fmt(ev, f), AppEvent::GamepadEvent(ev) => std::fmt::Debug::fmt(ev, f), } } } impl winit::application::ApplicationHandler for App { fn resumed(&mut self, event_loop: &winit::event_loop::ActiveEventLoop) { if self.attachment_window.is_none() { let window = match init_window(&self.args, &self.client, event_loop, &self.proxy) { Ok(w) => w, Err(e) => { error!("failed to attach to session: {:#}", e); event_loop.exit(); return; } }; self.attachment_window = Some(window); } } fn window_event( &mut self, event_loop: &winit::event_loop::ActiveEventLoop, window_id: winit::window::WindowId, event: winit::event::WindowEvent, ) { let Some(win) = &mut self.attachment_window else { return; }; if win.window.id() != window_id { return; } if let Err(e) = win.renderer.handle_event(&event) { error!("renderer error: {:#}", e); event_loop.exit(); return; } let res = win.handle_window_event(event); win.schedule_next_frame(event_loop, res); } fn device_event( &mut self, _event_loop: &winit::event_loop::ActiveEventLoop, _device_id: winit::event::DeviceId, event: winit::event::DeviceEvent, ) { let Some(win) = &mut self.attachment_window else { return; }; let winit::event::DeviceEvent::MouseMotion { delta: (x, y) } = event else { return; }; if let Some((x, y)) = win.motion_vector_to_attachment_space(x, y) { win.attachment.relative_pointer_motion(x, y) } } fn user_event(&mut self, event_loop: &winit::event_loop::ActiveEventLoop, event: AppEvent) { let Some(win) = &mut self.attachment_window else { return; }; let res = win.handle_app_event(event_loop, &self.client, event); win.schedule_next_frame(event_loop, res); } fn about_to_wait(&mut self, event_loop: &winit::event_loop::ActiveEventLoop) { let Some(win) = &mut self.attachment_window else { return; }; let res = win.idle(&self.client); win.schedule_next_frame(event_loop, res); } fn exiting(&mut self, _event_loop: &winit::event_loop::ActiveEventLoop) { if let Some(AttachmentWindow { attachment, session, .. }) = self.attachment_window.take() { debug!("detaching from session"); match attachment.detach().block_on() { Ok(()) | Err(client::ClientError::Detached) => (), Err(err) => error!(?err, "failed to detach cleanly"), } if self.end_session_on_exit { debug!("ending session"); match self .client .end_session(session.id, DEFAULT_REQUEST_TIMEOUT) .block_on() { Ok(()) => (), Err(client::ClientError::ServerError(err)) if err.err_code() == protocol::error::ErrorCode::ErrorSessionNotFound => {} Err(err) => error!(?err, "failed to end session"), } } } } } impl AttachmentWindow { fn handle_window_event(&mut self, event: winit::event::WindowEvent) -> anyhow::Result { trace!(?event, "handling window event"); use winit::event::*; match event { WindowEvent::RedrawRequested => { self.video_stream.prepare_frame()?; self.video_stream.mark_frame_rendered(); if !self.minimized && self.video_stream.is_ready() { unsafe { self.renderer.render(|ui| { self.flash.build(ui)?; if let Some(ref mut overlay) = self.overlay { overlay.build(ui)?; } Ok(()) })?; }; } self.next_frame = time::Instant::now() + MAX_FRAME_TIME; } WindowEvent::CloseRequested => return Ok(false), WindowEvent::Resized(size) => { if size.width == 0 || size.height == 0 { self.minimized = true; } else { debug!("resize event: {}x{}", size.width, size.height); if size.width != self.window_width || size.height != self.window_height { if let Some(ref mut overlay) = self.overlay { overlay.reposition(); } // Trigger a stream resize, but debounce first. self.resize_cooldown = Some(time::Instant::now() + RESIZE_COOLDOWN); } self.minimized = false; } } WindowEvent::ScaleFactorChanged { scale_factor, .. } => { debug!("window scale factor changed to {}", scale_factor); // Winit sends us a Resized event, immediately after this // one, with the new physical resolution. } WindowEvent::ModifiersChanged(modifiers) => { self.cursor_modifiers = modifiers.state(); } WindowEvent::KeyboardInput { event: KeyEvent { physical_key: winit::keyboard::PhysicalKey::Code(code), logical_key, state, repeat, .. }, .. } => { if state == ElementState::Pressed && logical_key == winit::keyboard::Key::Character("d".into()) && self.cursor_modifiers.control_key() { return Ok(false); } else { let char = match logical_key { winit::keyboard::Key::Character(text) => text.chars().next(), _ => None, }; let state = match state { _ if repeat => client::input::KeyState::Repeat, ElementState::Pressed => client::input::KeyState::Pressed, ElementState::Released => client::input::KeyState::Released, }; let key = winit_key_to_proto(code); if key == protocol::keyboard_input::Key::Unknown { debug!("unknown key: {:?}", code); } else { self.attachment .keyboard_input(key, state, char.map_or(0, Into::into)); } } } WindowEvent::CursorMoved { position, .. } => { let new_position = self.renderer.get_texture_aspect().and_then(|aspect| { // Calculate coordinates in [-1.0, 1.0]; let (clip_x, clip_y) = ( (position.x / self.window_width as f64) * 2.0 - 1.0, (position.y / self.window_height as f64) * 2.0 - 1.0, ); // Stretch the space to account for letterboxing. let clip_x = clip_x * aspect.0; let clip_y = clip_y * aspect.1; // In the letterbox. if clip_x.abs() > 1.0 || clip_y.abs() > 1.0 { return None; } // Convert to texture coordinates. let x = (clip_x + 1.0) / 2.0; let y = (clip_y + 1.0) / 2.0; // Convert the position to physical coordinates in the remote display. let cursor_x = x * self.attachment_config.width as f64; let cursor_y = y * self.attachment_config.height as f64; Some((cursor_x, cursor_y)) }); if let Some((cursor_x, cursor_y)) = new_position { self.attachment.pointer_motion(cursor_x, cursor_y); if new_position.is_some() && self.cursor_pos.is_none() { self.attachment.pointer_entered(); } else if new_position.is_none() && self.cursor_pos.is_some() { self.attachment.pointer_left(); } self.cursor_pos = new_position; } } WindowEvent::CursorEntered { .. } => { // Handled on the CursorMoved event. } WindowEvent::CursorLeft { .. } => { if self.cursor_pos.take().is_some() { self.attachment.pointer_left() } } WindowEvent::MouseInput { state, button, .. } => { use protocol::pointer_input::*; if self.cursor_pos.is_none() { return Ok(true); } let button = match button { winit::event::MouseButton::Left => Button::Left, winit::event::MouseButton::Right => Button::Right, winit::event::MouseButton::Middle => Button::Middle, winit::event::MouseButton::Back => Button::Back, winit::event::MouseButton::Forward => Button::Forward, winit::event::MouseButton::Other(id) => { debug!("skipping unknown mouse button: {}", id); return Ok(true); } }; let state = match state { ElementState::Pressed => ButtonState::Pressed, ElementState::Released => ButtonState::Released, }; let (cursor_x, cursor_y) = self.cursor_pos.unwrap(); self.attachment .pointer_input(button, state, cursor_x, cursor_y); } WindowEvent::MouseWheel { delta: MouseScrollDelta::LineDelta(x, y), phase: TouchPhase::Moved, .. } => self.attachment.pointer_scroll( client::input::ScrollType::Discrete, x as f64, y as f64, ), WindowEvent::MouseWheel { delta: MouseScrollDelta::PixelDelta(vector), phase: TouchPhase::Moved, .. } => { if let Some((x, y)) = self.motion_vector_to_attachment_space(vector.x, vector.y) { self.attachment .pointer_scroll(client::input::ScrollType::Continuous, x, y); } } _ => (), } Ok(true) } fn handle_app_event( &mut self, event_loop: &winit::event_loop::ActiveEventLoop, client: &client::Client, event: AppEvent, ) -> anyhow::Result { trace!(?event, "handling event"); use AttachmentEvent::*; match event { AppEvent::AttachmentEvent(ev) => match ev { VideoStreamStart(stream_seq, params) => { self.attachment_config.video_stream_seq_offset = stream_seq.max(self.attachment_config.video_stream_seq_offset); self.video_stream.reset( stream_seq, params.width, params.height, params.codec, )?; self.needs_refresh = None; } VideoPacket(packet) => { self.last_frame_received = time::Instant::now(); self.video_stream.recv_packet(packet)?; } DroppedVideoPacket(dropped) => { // Only request a keyframe once every ten seconds. if dropped.hierarchical_layer == 0 { self.needs_refresh = Some(dropped.stream_seq); } } AudioStreamStart(stream_seq, params) => { self.attachment_config.audio_stream_seq_offset = stream_seq.max(self.attachment_config.audio_stream_seq_offset); self.audio_stream.reset( stream_seq, params.sample_rate, params.channels.len() as u32, )?; } AudioPacket(packet) => { self.audio_stream.recv_packet(packet)?; } UpdateCursor { icon, image, hotspot_x, hotspot_y, } => { if let Some(image) = image { if let Ok(cursor) = load_cursor_image(&image, hotspot_x, hotspot_y) .map(|src| event_loop.create_custom_cursor(src)) { self.window.set_cursor(cursor); self.window.set_cursor_visible(true); } else { error!(image_len = image.len(), "custom cursor image update failed"); } } else if icon == protocol::update_cursor::CursorIcon::None { self.window.set_cursor_visible(false); } else { self.window.set_cursor(cursor_icon_from_proto(icon)); self.window.set_cursor_visible(true); } } LockPointer(x, y) => { debug!(x, y, "cursor locked"); // On most platforms, we have to lock the cursor before we // warp it. On mac, it's the other way around. #[cfg(not(target_vendor = "apple"))] self.window .set_cursor_grab(winit::window::CursorGrabMode::Locked)?; if let Some(aspect) = self.renderer.get_texture_aspect() { let width = self.attachment_config.width; let height = self.attachment_config.height; // Map vector to [-0.5, 0.5]. let x = (x / width as f64) - 0.5; let y = (y / height as f64) - 0.5; // Squish the space to account for letterboxing. let x = x / aspect.0; let y = y / aspect.1; // Map to the screen size. let x = (x + 0.5) * self.window_width as f64; let y = (y + 0.5) * self.window_height as f64; let pos: winit::dpi::PhysicalPosition = (x, y).into(); self.window.set_cursor_position(pos)?; } #[cfg(target_vendor = "apple")] self.window .set_cursor_grab(winit::window::CursorGrabMode::Locked)?; } ReleasePointer => { self.window .set_cursor_grab(winit::window::CursorGrabMode::None)?; } DisplayParamsChanged { params, reattach_required, } => { if reattach_required { self.attachment_config.width = params.width; self.attachment_config.height = params.height; // TODO: this blocks the app, which is not ideal. // We could spawn a thread for this, or reuse one. debug!("reattaching to session after resize"); self.attachment = client .attach_session( self.session.id, self.attachment_config.clone(), self.delegate.clone(), DEFAULT_REQUEST_TIMEOUT, ) .block_on()?; } self.session.display_params = params; } AttachmentEnded => { info!("attachment ended by server"); return Ok(false); } }, AppEvent::VideoStreamReady(texture, params) => { self.renderer.bind_video_texture(texture, params)?; } AppEvent::VideoFrameAvailable => { if self.video_stream.prepare_frame()?.is_some() { self.window.request_redraw(); } } AppEvent::GamepadEvent(gev) => match gev { GamepadEvent::Available(pad) => self.attachment.gamepad_available(pad), GamepadEvent::Unavailable(id) => self.attachment.gamepad_unavailable(id), GamepadEvent::Input(id, button, state) => { self.attachment.gamepad_input(id, button, state) } GamepadEvent::Motion(id, axis, value) => { self.attachment.gamepad_motion(id, axis, value) } }, } Ok(true) } fn idle(&mut self, client: &client::Client) -> anyhow::Result { if self.next_frame.elapsed() > time::Duration::ZERO { self.window.request_redraw(); } if self.stats_timer.elapsed() > time::Duration::from_millis(100) { STATS.set_connection_rtt(client.stats().rtt) } let last_frame = self.last_frame_received.elapsed(); if last_frame > time::Duration::from_secs(1) { if last_frame > DEFAULT_REQUEST_TIMEOUT { // TODO: this fires when we've tabbed away. bail!("timed out waiting for video frames"); } else { self.flash.set_message("waiting for server..."); } } // Debounced processing of the resize event. if self.resize_cooldown.is_some() && self.resize_cooldown.unwrap().elapsed() > time::Duration::ZERO { let size = self.window.inner_size(); let scale_factor = self.window.scale_factor(); if size.width != self.window_width || size.height != self.window_height || scale_factor != self.window_ui_scale { debug!( width = size.width, height = size.height, scale_factor, "window resized" ); self.window_width = size.width; self.window_height = size.height; self.window_ui_scale = scale_factor; let desired_ui_scale = determine_ui_scale( self.configured_ui_scale .unwrap_or(self.window.scale_factor()), ); let (desired_width, desired_height) = determine_resolution( self.configured_resolution, self.window_width, self.window_height, ); let desired_params = client::display_params::DisplayParams { width: desired_width, height: desired_height, ui_scale: desired_ui_scale, framerate: self.configured_framerate, }; // Update the session to match our desired resolution or // scale. Note that this is skipped if there is no // current attachment (and `current_streaming_res` is // None). if desired_params != self.session.display_params { debug!( "resizing session to {}x{}@{} (scale: {})", desired_width, desired_height, self.configured_framerate, desired_ui_scale, ); self.flash.set_message("resizing..."); // TODO: this blocks the app. client .update_session_display_params( self.session.id, desired_params, DEFAULT_REQUEST_TIMEOUT, ) .block_on()?; } } self.resize_cooldown = None; } // Request a video refresh if we need one, but only every ten seconds. if self.needs_refresh.is_some() && self .refresh_cooldown .is_none_or(|t| t.elapsed() > time::Duration::from_secs(10)) { let stream_seq = self.needs_refresh.unwrap(); debug!(stream_seq, "requesting video refresh"); self.attachment.request_video_refresh(stream_seq); self.refresh_cooldown = Some(time::Instant::now()); self.needs_refresh = None; } Ok(true) } fn schedule_next_frame( &mut self, event_loop: &winit::event_loop::ActiveEventLoop, res: anyhow::Result, ) { match res { Ok(true) => { event_loop.set_control_flow(ControlFlow::WaitUntil(self.next_frame)); } Ok(false) => event_loop.exit(), Err(e) => { error!("{:#}", e); event_loop.exit() } } } fn motion_vector_to_attachment_space(&self, x: f64, y: f64) -> Option<(f64, f64)> { let (aspect_x, aspect_y) = self.renderer.get_texture_aspect()?; // Map vector to [0, 1]. (It can also be negative.) let (x, y) = ( (x / self.window_width as f64), (y / self.window_height as f64), ); // Stretch the space to account for letterboxing. For // example, if the video texture only takes up one third // of the screen vertically, and we scroll up one third // of the window height, the resulting vector should be [0, // -1.0]. let x = x * aspect_x; let y = y * aspect_y; Some(( x * self.attachment_config.width as f64, y * self.attachment_config.height as f64, )) } } pub fn main() -> anyhow::Result<()> { init_logging()?; let args = Cli::parse(); let cmds: u8 = vec![ args.list_apps, args.list, args.kill, args.launch, args.resume, ] .into_iter() .map(|b| b as u8) .sum(); if cmds > 1 { bail!("only one of --launch, --resume, --list, or --kill may be specified"); } else if !(args.list || args.list_apps) && args.app.is_none() { bail!("an app name or session ID must be specified"); } else if args.list_apps && args.app.is_some() { bail!("an app name or session ID may not be specified alongside --list-apps") } debug!("establishing connection to {:}", &args.host); let client = client::Client::new(&args.host, "mmclient", DEFAULT_CONNECT_TIMEOUT).block_on()?; if args.list_apps { return cmd_list_apps(&client); } else if args.list { return cmd_list_sessions(&args, &client); } else if args.kill { return cmd_kill(&args, &client); } let event_loop = winit::event_loop::EventLoop::with_user_event().build()?; let proxy = event_loop.create_proxy(); let end_session_on_exit = args.kill_on_exit; let mut app = App { client, args, attachment_window: None, proxy, end_session_on_exit, }; event_loop.run_app(&mut app)?; Ok(()) } fn init_window( args: &Cli, client: &client::Client, event_loop: &winit::event_loop::ActiveEventLoop, proxy: &winit::event_loop::EventLoopProxy, ) -> anyhow::Result { let sessions = client.list_sessions(DEFAULT_REQUEST_TIMEOUT).block_on()?; let target = args.app.clone().unwrap(); let matched = filter_sessions(sessions, args.app.as_ref().unwrap()); if !args.launch && matched.len() > 1 { bail!( "multiple sessions found matching {:?}, specify a session ID to attach or use \ --launch to create a new one.", target, ); } else if args.resume && matched.is_empty() { bail!("no session found matching {:?}", target); } let configured_codec = match args.codec.as_deref() { Some("h264") => client::codec::VideoCodec::H264, Some("h265") | None => client::codec::VideoCodec::H265, Some("av1") => client::codec::VideoCodec::Av1, Some(v) => bail!("invalid codec: {:?}", v), }; let configured_profile = if args.hdr { protocol::VideoProfile::Hdr10 } else { protocol::VideoProfile::Hd }; let session = if args.launch || matched.is_empty() { None } else { Some(matched[0].clone()) }; let window_attr = if args.fullscreen { window::Window::default_attributes() .with_fullscreen(Some(window::Fullscreen::Borderless(None))) } else { window::Window::default_attributes() }; let window = Arc::new(event_loop.create_window(window_attr)?); let vk = unsafe { Arc::new(vulkan::VkContext::new( window.clone(), cfg!(debug_assertions), )?) }; let renderer = Renderer::new(vk.clone(), window.clone(), args.hdr)?; let window_size = window.inner_size(); let window_ui_scale = window.scale_factor(); let (width, height) = determine_resolution(args.resolution, window_size.width, window_size.height); let desired_params = client::display_params::DisplayParams { width, height, framerate: args.framerate, ui_scale: determine_ui_scale(args.ui_scale.unwrap_or(window_ui_scale)), }; let initial_gamepads = spawn_gamepad_monitor(proxy.clone())?; let session_id = if let Some(session) = session { if session.display_params != desired_params { debug!("updating session params to {:?}", desired_params); client .update_session_display_params(session.id, desired_params, DEFAULT_REQUEST_TIMEOUT) .block_on()?; } session.id } else { let target = args.app.as_ref().unwrap(); let target = target.rsplit("/").next().unwrap(); info!("launching a new session for for app {:?}", target); client .launch_session( target.into(), desired_params.clone(), initial_gamepads.clone(), DEFAULT_REQUEST_TIMEOUT, ) .block_on()? .id }; // Refetch the session params. let session = client .list_sessions(DEFAULT_REQUEST_TIMEOUT) .block_on()? .into_iter() .find(|s| s.id == session_id) .ok_or(anyhow!("new session not found in session list"))?; let now = time::Instant::now(); let mut flash = Flash::new(); flash.set_message("connecting..."); let overlay = if args.overlay { Some(Overlay::new(args.framerate)) } else { None }; let delegate = Arc::new(AttachmentProxy::new(proxy.clone())); let audio_stream = audio::AudioStream::new()?; let video_stream = video::VideoStream::new(vk.clone(), proxy.clone()); spawn_gamepad_monitor(proxy.clone())?; let attachment_config = client::AttachmentConfig { width: session.display_params.width, height: session.display_params.height, video_codec: Some(configured_codec), video_profile: Some(configured_profile), quality_preset: Some(args.preset + 1), audio_codec: None, sample_rate: None, channels: Vec::new(), video_stream_seq_offset: 0, audio_stream_seq_offset: 0, }; debug!(session_id = session.id, "attaching to session"); let attachment = client .attach_session( session.id, attachment_config.clone(), delegate.clone(), DEFAULT_REQUEST_TIMEOUT, ) .block_on()?; Ok(AttachmentWindow { configured_resolution: args.resolution, configured_framerate: args.framerate, configured_ui_scale: args.ui_scale, window, attachment, attachment_config, delegate, session, video_stream, audio_stream, renderer, window_width: window_size.width, window_height: window_size.height, window_ui_scale, minimized: false, next_frame: now + MAX_FRAME_TIME, last_frame_received: now, resize_cooldown: None, needs_refresh: None, refresh_cooldown: None, cursor_modifiers: winit::keyboard::ModifiersState::default(), cursor_pos: None, flash, overlay, stats_timer: now, _vk: vk, }) } fn init_logging() -> anyhow::Result<()> { if cfg!(feature = "tracy") { use tracing_subscriber::layer::SubscriberExt; let filter = tracing_subscriber::EnvFilter::builder() .with_default_directive(tracing::level_filters::LevelFilter::INFO.into()) .from_env()? .add_directive("mmclient=trace".parse()?) .add_directive("mm_client=trace".parse()?) .add_directive("mm_client_common=trace".parse()?); tracing::subscriber::set_global_default( tracing_subscriber::registry() .with(tracing_tracy::TracyLayer::default().with_filter(filter)), ) .expect("setup tracy layer"); } else if let Ok(env_filter) = tracing_subscriber::EnvFilter::try_from_default_env() { tracing_subscriber::fmt().with_env_filter(env_filter).init(); } else { let filter = tracing_subscriber::EnvFilter::builder() .with_default_directive(tracing::level_filters::LevelFilter::INFO.into()) .from_env()? .add_directive("mmclient=info".parse()?) .add_directive("mm_client=info".parse()?) .add_directive("mm_client_common=info".parse()?); tracing_subscriber::fmt().with_env_filter(filter).init(); } // Squash ffmpeg logs. unsafe { ffmpeg_sys::av_log_set_level(ffmpeg_sys::AV_LOG_QUIET); // TODO: the callback has to be variadic, which means using nightly // rust. // ffmpeg_sys::av_log_set_callback(Some(ffmpeg_log_callback)) } Ok(()) } fn determine_ui_scale(scale_factor: f64) -> client::pixel_scale::PixelScale { let scale = match scale_factor { x if x < 1.0 => client::pixel_scale::PixelScale::ONE, _ => { // Multiplying by 6/6 captures most possible fractional scales. let numerator = (scale_factor * 6.0).round() as u32; let denominator = 6; if numerator % denominator == 0 { client::pixel_scale::PixelScale::new(numerator / denominator, 1) } else { client::pixel_scale::PixelScale::new(numerator, denominator) } } }; if scale.is_fractional() { let rounded = scale.round_up(); warn!( requested = %scale, using = %rounded, "fractional scale not supported, rounding up" ); return rounded; } scale } fn determine_resolution(resolution: Resolution, width: u32, height: u32) -> (u32, u32) { match resolution { Resolution::Auto => (width.next_multiple_of(2), height.next_multiple_of(2)), Resolution::Height(h) => { let h = std::cmp::min(h, height).next_multiple_of(2); let w = (h * width / height).next_multiple_of(2); (w, h) } Resolution::Custom(w, h) => (w, h), } } fn filter_sessions(sessions: Vec, app: &str) -> Vec { if let Ok(id) = app.parse::() { return match sessions.into_iter().find(|s| s.id == id) { Some(s) => vec![s], None => vec![], }; } sessions .into_iter() .filter(|s| s.application_id == app) .collect() } fn cmd_list_apps(client: &client::Client) -> anyhow::Result<()> { let apps = client .list_applications(DEFAULT_REQUEST_TIMEOUT) .block_on()?; if apps.is_empty() { println!("No launchable applications found."); return Ok(()); } let mut apps = apps .into_iter() .map(|app| { let mut name = String::new(); for dir in &app.folder { name.push_str(dir); name.push('/'); } name.push_str(&app.id); (name, app.description) }) .collect::>(); apps.sort(); let mut tw = tabwriter::TabWriter::new(std::io::stdout()).padding(4); use std::io::Write as _; writeln!(&mut tw, "Name\tDescription")?; writeln!(&mut tw, "----\t-----------")?; for (name, desc) in apps { if desc.len() <= 80 { writeln!(&mut tw, "{}\t{}", name, desc)?; } else { writeln!(&mut tw, "{}\t{}...", name, &desc[..77])?; } } tw.flush()?; Ok(()) } fn cmd_list_sessions(args: &Cli, client: &client::Client) -> anyhow::Result<()> { let sessions = client.list_sessions(DEFAULT_REQUEST_TIMEOUT).block_on()?; let sessions = if let Some(target) = args.app.as_ref() { filter_sessions(sessions, target) } else { sessions }; if sessions.is_empty() { println!("No (matching) sessions found."); return Ok(()); } let now = time::SystemTime::now(); let mut tw = tabwriter::TabWriter::new(std::io::stdout()).padding(4); use std::io::Write as _; writeln!(&mut tw, "Session ID\tApplication Name\tRuntime")?; writeln!(&mut tw, "----------\t----------------\t-------")?; for session in sessions { let runtime = { // Round to seconds. let secs = now.duration_since(session.start)?.as_secs(); humantime::format_duration(time::Duration::from_secs(secs)).to_string() }; writeln!( &mut tw, "{}\t{}\t{}", session.id, session.application_id, runtime, )?; } tw.flush()?; Ok(()) } fn cmd_kill(args: &Cli, client: &client::Client) -> anyhow::Result<()> { let target = args.app.as_ref().unwrap(); let sessions = filter_sessions( client.list_sessions(DEFAULT_REQUEST_TIMEOUT).block_on()?, target, ); if sessions.is_empty() { println!("No (matching) sessions found."); return Ok(()); } else if sessions.len() > 1 { bail!("Multiple sessions matched!"); } client .end_session(sessions[0].id, DEFAULT_REQUEST_TIMEOUT) .block_on()?; Ok(()) } ================================================ FILE: mm-client/src/cursor.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use mm_protocol as protocol; use winit::window::{CursorIcon, CustomCursor, CustomCursorSource}; pub fn load_cursor_image(image: &[u8], hs_x: u32, hs_y: u32) -> anyhow::Result { let cursor = image::load_from_memory_with_format(image, image::ImageFormat::Png)?; let w = cursor.width().try_into()?; let h = cursor.height().try_into()?; let hs_x = hs_x.try_into()?; let hs_y = hs_y.try_into()?; Ok(CustomCursor::from_rgba( cursor.to_rgba8().into_raw(), w, h, hs_x, hs_y, )?) } pub fn cursor_icon_from_proto(icon: protocol::update_cursor::CursorIcon) -> CursorIcon { match icon { protocol::update_cursor::CursorIcon::ContextMenu => CursorIcon::ContextMenu, protocol::update_cursor::CursorIcon::Help => CursorIcon::Help, protocol::update_cursor::CursorIcon::Pointer => CursorIcon::Pointer, protocol::update_cursor::CursorIcon::Progress => CursorIcon::Progress, protocol::update_cursor::CursorIcon::Wait => CursorIcon::Wait, protocol::update_cursor::CursorIcon::Cell => CursorIcon::Cell, protocol::update_cursor::CursorIcon::Crosshair => CursorIcon::Crosshair, protocol::update_cursor::CursorIcon::Text => CursorIcon::Text, protocol::update_cursor::CursorIcon::VerticalText => CursorIcon::VerticalText, protocol::update_cursor::CursorIcon::Alias => CursorIcon::Alias, protocol::update_cursor::CursorIcon::Copy => CursorIcon::Copy, protocol::update_cursor::CursorIcon::Move => CursorIcon::Move, protocol::update_cursor::CursorIcon::NoDrop => CursorIcon::NoDrop, protocol::update_cursor::CursorIcon::NotAllowed => CursorIcon::NotAllowed, protocol::update_cursor::CursorIcon::Grab => CursorIcon::Grab, protocol::update_cursor::CursorIcon::Grabbing => CursorIcon::Grabbing, protocol::update_cursor::CursorIcon::EResize => CursorIcon::EResize, protocol::update_cursor::CursorIcon::NResize => CursorIcon::NResize, protocol::update_cursor::CursorIcon::NeResize => CursorIcon::NeResize, protocol::update_cursor::CursorIcon::NwResize => CursorIcon::NwResize, protocol::update_cursor::CursorIcon::SResize => CursorIcon::SResize, protocol::update_cursor::CursorIcon::SeResize => CursorIcon::SeResize, protocol::update_cursor::CursorIcon::SwResize => CursorIcon::SwResize, protocol::update_cursor::CursorIcon::WResize => CursorIcon::WResize, protocol::update_cursor::CursorIcon::EwResize => CursorIcon::EwResize, protocol::update_cursor::CursorIcon::NsResize => CursorIcon::NsResize, protocol::update_cursor::CursorIcon::NeswResize => CursorIcon::NeswResize, protocol::update_cursor::CursorIcon::NwseResize => CursorIcon::NwseResize, protocol::update_cursor::CursorIcon::ColResize => CursorIcon::ColResize, protocol::update_cursor::CursorIcon::RowResize => CursorIcon::RowResize, protocol::update_cursor::CursorIcon::AllScroll => CursorIcon::AllScroll, protocol::update_cursor::CursorIcon::ZoomIn => CursorIcon::ZoomIn, protocol::update_cursor::CursorIcon::ZoomOut => CursorIcon::ZoomOut, _ => CursorIcon::Default, } } ================================================ FILE: mm-client/src/delegate.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::sync::Arc; use mm_client_common as client; use tracing::error; // An implementation of client-common's AttachmentDelegate that converts // callbacks into winit events. #[derive(Debug)] pub struct AttachmentProxy + std::fmt::Debug + Send + 'static>( winit::event_loop::EventLoopProxy, ); impl + std::fmt::Debug + Send + 'static> AttachmentProxy { pub fn new(proxy: winit::event_loop::EventLoopProxy) -> Self { Self(proxy) } fn proxy(&self, ev: AttachmentEvent) { let _ = self.0.send_event(ev.into()); } } pub enum AttachmentEvent { VideoStreamStart(u64, client::VideoStreamParams), VideoPacket(Arc), DroppedVideoPacket(client::DroppedPacket), AudioStreamStart(u64, client::AudioStreamParams), AudioPacket(Arc), UpdateCursor { icon: client::input::CursorIcon, image: Option>, hotspot_x: u32, hotspot_y: u32, }, LockPointer(f64, f64), ReleasePointer, DisplayParamsChanged { params: client::display_params::DisplayParams, reattach_required: bool, }, AttachmentEnded, } impl std::fmt::Debug for AttachmentEvent { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { AttachmentEvent::VideoStreamStart(stream_seq, _) => { write!(f, "VideoStreamStart({})", stream_seq) } AttachmentEvent::VideoPacket(packet) => { write!(f, "VideoPacket({}, {})", packet.stream_seq(), packet.seq()) } AttachmentEvent::DroppedVideoPacket(dropped) => { write!( f, "DroppedVideoPacket({}, {}, layer={})", dropped.stream_seq, dropped.seq, dropped.hierarchical_layer ) } AttachmentEvent::AudioStreamStart(stream_seq, _) => { write!(f, "AudioStreamStart({})", stream_seq) } AttachmentEvent::AudioPacket(packet) => { write!(f, "AudioPacket({}, {})", packet.stream_seq(), packet.seq()) } AttachmentEvent::UpdateCursor { icon, image, .. } => { let len = image.as_ref().map(|img| img.len()).unwrap_or_default(); write!(f, "UpdateCursor({icon:?} image_len={len})",) } AttachmentEvent::LockPointer(x, y) => { write!(f, "LockPointer({}, {})", x, y) } AttachmentEvent::ReleasePointer => { write!(f, "ReleasePointer()") } AttachmentEvent::DisplayParamsChanged { reattach_required, .. } => { write!(f, "DisplayParamsChanged(reattach={})", reattach_required) } AttachmentEvent::AttachmentEnded => { write!(f, "AttachmentEnded") } } } } impl + std::fmt::Debug + Send + 'static> client::AttachmentDelegate for AttachmentProxy { fn video_stream_start(&self, stream_seq: u64, params: client::VideoStreamParams) { self.proxy(AttachmentEvent::VideoStreamStart(stream_seq, params)) } fn video_packet(&self, packet: Arc) { self.proxy(AttachmentEvent::VideoPacket(packet)) } fn dropped_video_packet(&self, dropped: client::DroppedPacket) { self.proxy(AttachmentEvent::DroppedVideoPacket(dropped)) } fn audio_stream_start(&self, stream_seq: u64, params: client::AudioStreamParams) { self.proxy(AttachmentEvent::AudioStreamStart(stream_seq, params)) } fn audio_packet(&self, packet: Arc) { self.proxy(AttachmentEvent::AudioPacket(packet)) } fn update_cursor( &self, icon: client::input::CursorIcon, image: Option>, hotspot_x: u32, hotspot_y: u32, ) { self.proxy(AttachmentEvent::UpdateCursor { icon, image, hotspot_x, hotspot_y, }) } fn lock_pointer(&self, x: f64, y: f64) { self.proxy(AttachmentEvent::LockPointer(x, y)) } fn release_pointer(&self) { self.proxy(AttachmentEvent::ReleasePointer) } fn display_params_changed( &self, params: client::display_params::DisplayParams, reattach_required: bool, ) { self.proxy(AttachmentEvent::DisplayParamsChanged { params, reattach_required, }) } fn error(&self, err: client::ClientError) { error!("error: {err:?}"); self.proxy(AttachmentEvent::AttachmentEnded) } fn attachment_ended(&self) { self.proxy(AttachmentEvent::AttachmentEnded) } } ================================================ FILE: mm-client/src/flash.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::time; const FLASH_DURATION: time::Duration = time::Duration::from_millis(1350); const FADE_OUT_AFTER: time::Duration = time::Duration::from_millis(1000); pub struct Flash { message: Option<(String, time::Instant)>, } impl Flash { pub fn new() -> Self { Self { message: None } } pub fn set_message(&mut self, s: &str) { self.message = Some((s.to_owned(), time::Instant::now())); } pub fn build(&mut self, ui: &imgui::Ui) -> anyhow::Result<()> { if self.message.is_none() { return Ok(()); } let start = self.message.as_ref().unwrap().1; if start.elapsed() > FLASH_DURATION { self.message = None; return Ok(()); } let alpha = if start.elapsed() > FADE_OUT_AFTER { let remaining = FLASH_DURATION - start.elapsed(); remaining.as_secs_f32() / (FLASH_DURATION - FADE_OUT_AFTER).as_secs_f32() } else { 1.0 }; // Exponentially ease the alpha. let alpha = alpha * alpha; let _style_alpha = ui.push_style_var(imgui::StyleVar::Alpha(alpha)); let _style_border = ui.push_style_var(imgui::StyleVar::WindowBorderSize(0.0)); let [_width, height] = ui.io().display_size; if let Some(_window) = ui .window("flash") .position([0.0, height], imgui::Condition::Always) .position_pivot([0.0, 1.0]) .no_decoration() .no_nav() .movable(false) .always_auto_resize(true) .bg_alpha(0.5 * alpha) .begin() { ui.set_window_font_scale(2.0); ui.text(&self.message.as_ref().unwrap().0); } Ok(()) } } impl Default for Flash { fn default() -> Self { Self::new() } } ================================================ FILE: mm-client/src/font.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use font_kit::{ family_name::FamilyName, font::Font, properties::{Properties, Weight}, source::SystemSource, }; use tracing::debug; pub fn load_ui_font() -> anyhow::Result { let font = SystemSource::new() .select_best_match( &[FamilyName::Monospace, FamilyName::SansSerif], Properties::new().weight(Weight::THIN), )? .load()?; debug!("font: {:?}", font); Ok(font) } // #[cfg(target_os = "macos")] // pub fn load_ui_font() -> anyhow::Result { // let ctf = core_text::font::new_ui_font_for_language(); // let font = unsafe { Font::from_native_font(ctf) }; // Ok(font) // } ================================================ FILE: mm-client/src/gamepad.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::{collections::HashMap, time}; use anyhow::{anyhow, bail}; use gilrs::{Event, EventType}; use mm_client_common::input::{ Gamepad, GamepadAxis, GamepadButton, GamepadButtonState, GamepadLayout, }; use tracing::{debug, error, trace}; #[derive(Debug, Clone)] pub enum GamepadEvent { Available(Gamepad), Unavailable(u64), Input(u64, GamepadButton, GamepadButtonState), Motion(u64, GamepadAxis, f64), } #[derive(Debug, Default, Clone, Copy)] struct RemoteGamepad { id: u64, dpad: DpadState, } // Some gamepads treat the dpad as an axis, but we treat it as a // bunch of buttons. Therefore, it requires a bit of special handling. #[derive(Debug, Default, Clone, Copy)] struct DpadState { up: bool, down: bool, left: bool, right: bool, } impl RemoteGamepad { fn update_dpad( &mut self, axis: gilrs::Axis, value: f32, proxy: &winit::event_loop::EventLoopProxy, ) -> Result<(), winit::event_loop::EventLoopClosed> where T: From + Send, { let set_pressed = |state: &mut bool, button| { if !*state { proxy.send_event( GamepadEvent::Input(self.id, button, GamepadButtonState::Pressed).into(), )?; } *state = true; Ok(()) }; let set_released = |state: &mut bool, button| { if *state { proxy.send_event( GamepadEvent::Input(self.id, button, GamepadButtonState::Released).into(), )?; } *state = false; Ok(()) }; match axis { gilrs::Axis::DPadX if value == 0.0 => { set_released(&mut self.dpad.left, GamepadButton::DpadLeft)?; set_released(&mut self.dpad.right, GamepadButton::DpadRight)?; } gilrs::Axis::DPadX if value < 0.0 => { set_pressed(&mut self.dpad.left, GamepadButton::DpadLeft)?; set_released(&mut self.dpad.right, GamepadButton::DpadRight)?; } gilrs::Axis::DPadX if value > 0.0 => { set_released(&mut self.dpad.left, GamepadButton::DpadLeft)?; set_pressed(&mut self.dpad.right, GamepadButton::DpadRight)?; } gilrs::Axis::DPadY if value == 0.0 => { set_released(&mut self.dpad.up, GamepadButton::DpadUp)?; set_released(&mut self.dpad.down, GamepadButton::DpadDown)?; } gilrs::Axis::DPadY if value < 0.0 => { set_pressed(&mut self.dpad.up, GamepadButton::DpadUp)?; set_released(&mut self.dpad.down, GamepadButton::DpadDown)?; } gilrs::Axis::DPadY if value > 0.0 => { set_released(&mut self.dpad.up, GamepadButton::DpadUp)?; set_pressed(&mut self.dpad.down, GamepadButton::DpadDown)?; } _ => unreachable!(), }; Ok(()) } } /// Spawns a thread to watch for gamepad events. Returns the initial list of /// available gamepads. pub fn spawn_gamepad_monitor( proxy: winit::event_loop::EventLoopProxy, ) -> anyhow::Result> where T: From + Send, { let mut gilrs = gilrs::Gilrs::new().map_err(|e| anyhow!("failed to create gilrs context: {e:?}"))?; let (initial_tx, initial_rx) = oneshot::channel(); std::thread::spawn(move || { let mut remote_gamepads = HashMap::new(); let mut initial = Vec::new(); for (id, pad) in gilrs.gamepads() { let protocol_id = gamepad_id(pad.uuid()); let layout = layout(pad); remote_gamepads.insert( id, RemoteGamepad { id: protocol_id, ..Default::default() }, ); initial.push(Gamepad { id: protocol_id, layout, }); } if initial_tx.send(initial).is_err() { return; } loop { let Some(Event { id, event: ev, .. }) = gilrs.next_event_blocking(None) else { continue; }; trace!(?id, ?ev, "gamepad event"); if let EventType::Disconnected = ev { if let Some(pad) = remote_gamepads.remove(&id) { if proxy .send_event(GamepadEvent::Unavailable(pad.id).into()) .is_err() { break; } } continue; }; if let EventType::Connected = ev { let Some(pad) = gilrs.connected_gamepad(id) else { error!(?ev, "no gamepad matching event"); continue; }; let protocol_id = gamepad_id(pad.uuid()); remote_gamepads.insert( id, RemoteGamepad { id: protocol_id, ..Default::default() }, ); if proxy .send_event( GamepadEvent::Available(Gamepad { id: protocol_id, layout: layout(pad), }) .into(), ) .is_err() { break; } continue; } let pad = remote_gamepads.get_mut(&id).unwrap(); if handle_gilrs_event(&proxy, pad, ev).is_err() { break; }; } }); match initial_rx.recv_timeout(time::Duration::from_secs(1)) { Ok(initial) => Ok(initial), Err(_) => bail!("gamepad monitor thread panicked"), } } fn handle_gilrs_event( proxy: &winit::event_loop::EventLoopProxy, pad: &mut RemoteGamepad, ev: gilrs::EventType, ) -> Result<(), winit::event_loop::EventLoopClosed> where T: From + Send, { let gev = match ev { EventType::ButtonPressed(button, _) => { input_event(pad.id, button, GamepadButtonState::Pressed) } EventType::ButtonReleased(button, _) => { input_event(pad.id, button, GamepadButtonState::Released) } EventType::AxisChanged(axis, mut value, _) => { // Some gamepads treat the dpad as an axis. The protocol // treats it as a bunch of buttons. if matches!(axis, gilrs::Axis::DPadX | gilrs::Axis::DPadY) { pad.update_dpad(axis, value, proxy)?; return Ok(()); } let Some(axis) = girls_axis_to_proto(axis) else { debug!(?ev, "skipping unknown axis event"); return Ok(()); }; // Gilrs treats 1.0 as up. if matches!(axis, GamepadAxis::LeftY | GamepadAxis::RightY) { value *= -1.0; } Some(GamepadEvent::Motion(pad.id, axis, value as _)) } EventType::ButtonChanged(button, value, _) => { // Not sure why gilrs doesn't consider this an axis. match button { gilrs::Button::LeftTrigger2 => Some(GamepadEvent::Motion( pad.id, GamepadAxis::LeftTrigger, value.max(0.0) as _, )), gilrs::Button::RightTrigger2 => Some(GamepadEvent::Motion( pad.id, GamepadAxis::RightTrigger, value.max(0.0) as _, )), _ => None, } } EventType::Dropped => None, // TODO: do we need these? EventType::ButtonRepeated(_, _) => None, // Handled above. EventType::Connected | EventType::Disconnected => unreachable!(), }; if let Some(ev) = gev { proxy.send_event(ev.into())?; } else { debug!(?ev, "ignoring gamepad event") } Ok(()) } fn input_event( protocol_id: u64, button: gilrs::Button, state: GamepadButtonState, ) -> Option { gilrs_button_to_proto(button).map(|button| GamepadEvent::Input(protocol_id, button, state)) } fn gamepad_id(uuid: [u8; 16]) -> u64 { // Truncating a UUID is squicky, but serves our purposes fine. let (_, last_64) = uuid.split_at(8); let last_64: [u8; 8] = last_64.try_into().unwrap(); u64::from_ne_bytes(last_64) } fn layout(pad: gilrs::Gamepad) -> GamepadLayout { match pad.vendor_id() { Some(0x54c) => GamepadLayout::SonyDualshock, _ => GamepadLayout::GenericDualStick, } } fn girls_axis_to_proto(axis: gilrs::Axis) -> Option { let axis = match axis { gilrs::Axis::LeftStickX => GamepadAxis::LeftX, gilrs::Axis::LeftStickY => GamepadAxis::LeftY, gilrs::Axis::RightStickX => GamepadAxis::RightX, gilrs::Axis::RightStickY => GamepadAxis::RightY, gilrs::Axis::LeftZ => GamepadAxis::RightTrigger, gilrs::Axis::RightZ => GamepadAxis::RightTrigger, _ => return None, }; Some(axis) } fn gilrs_button_to_proto(button: gilrs::Button) -> Option { let button = match button { gilrs::Button::South => GamepadButton::South, gilrs::Button::East => GamepadButton::East, gilrs::Button::North => GamepadButton::North, gilrs::Button::West => GamepadButton::West, gilrs::Button::C => GamepadButton::C, gilrs::Button::Z => GamepadButton::Z, gilrs::Button::LeftTrigger => GamepadButton::ShoulderLeft, gilrs::Button::LeftTrigger2 => GamepadButton::TriggerLeft, gilrs::Button::RightTrigger => GamepadButton::ShoulderRight, gilrs::Button::RightTrigger2 => GamepadButton::TriggerRight, gilrs::Button::Select => GamepadButton::Select, gilrs::Button::Start => GamepadButton::Start, gilrs::Button::Mode => GamepadButton::Logo, gilrs::Button::LeftThumb => GamepadButton::JoystickLeft, gilrs::Button::RightThumb => GamepadButton::JoystickRight, gilrs::Button::DPadUp => GamepadButton::DpadUp, gilrs::Button::DPadDown => GamepadButton::DpadDown, gilrs::Button::DPadLeft => GamepadButton::DpadLeft, gilrs::Button::DPadRight => GamepadButton::DpadRight, _ => return None, }; Some(button) } ================================================ FILE: mm-client/src/keys.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use mm_protocol::keyboard_input::Key; use winit::keyboard::KeyCode; pub fn winit_key_to_proto(key: KeyCode) -> Key { match key { KeyCode::Backquote => Key::Backquote, KeyCode::Backslash => Key::Backslash, KeyCode::BracketLeft => Key::BracketLeft, KeyCode::BracketRight => Key::BracketRight, KeyCode::Comma => Key::Comma, KeyCode::Digit0 => Key::Digit0, KeyCode::Digit1 => Key::Digit1, KeyCode::Digit2 => Key::Digit2, KeyCode::Digit3 => Key::Digit3, KeyCode::Digit4 => Key::Digit4, KeyCode::Digit5 => Key::Digit5, KeyCode::Digit6 => Key::Digit6, KeyCode::Digit7 => Key::Digit7, KeyCode::Digit8 => Key::Digit8, KeyCode::Digit9 => Key::Digit9, KeyCode::Equal => Key::Equal, KeyCode::IntlBackslash => Key::IntlBackslash, KeyCode::IntlRo => Key::IntlRo, KeyCode::IntlYen => Key::IntlYen, KeyCode::KeyA => Key::A, KeyCode::KeyB => Key::B, KeyCode::KeyC => Key::C, KeyCode::KeyD => Key::D, KeyCode::KeyE => Key::E, KeyCode::KeyF => Key::F, KeyCode::KeyG => Key::G, KeyCode::KeyH => Key::H, KeyCode::KeyI => Key::I, KeyCode::KeyJ => Key::J, KeyCode::KeyK => Key::K, KeyCode::KeyL => Key::L, KeyCode::KeyM => Key::M, KeyCode::KeyN => Key::N, KeyCode::KeyO => Key::O, KeyCode::KeyP => Key::P, KeyCode::KeyQ => Key::Q, KeyCode::KeyR => Key::R, KeyCode::KeyS => Key::S, KeyCode::KeyT => Key::T, KeyCode::KeyU => Key::U, KeyCode::KeyV => Key::V, KeyCode::KeyW => Key::W, KeyCode::KeyX => Key::X, KeyCode::KeyY => Key::Y, KeyCode::KeyZ => Key::Z, KeyCode::Minus => Key::Minus, KeyCode::Period => Key::Period, KeyCode::Quote => Key::Quote, KeyCode::Semicolon => Key::Semicolon, KeyCode::Slash => Key::Slash, KeyCode::AltLeft => Key::AltLeft, KeyCode::AltRight => Key::AltRight, KeyCode::Backspace => Key::Backspace, KeyCode::CapsLock => Key::CapsLock, KeyCode::ContextMenu => Key::ContextMenu, KeyCode::ControlLeft => Key::ControlLeft, KeyCode::ControlRight => Key::ControlRight, KeyCode::Enter => Key::Enter, KeyCode::SuperLeft => Key::MetaLeft, KeyCode::SuperRight => Key::MetaRight, KeyCode::ShiftLeft => Key::ShiftLeft, KeyCode::ShiftRight => Key::ShiftRight, KeyCode::Space => Key::Space, KeyCode::Tab => Key::Tab, KeyCode::Convert => Key::Convert, KeyCode::KanaMode => Key::KanaMode, KeyCode::Lang1 => Key::Lang1, KeyCode::Lang2 => Key::Lang2, KeyCode::Lang3 => Key::Lang3, KeyCode::Lang4 => Key::Lang4, KeyCode::Lang5 => Key::Lang5, KeyCode::NonConvert => Key::NonConvert, KeyCode::Delete => Key::Delete, KeyCode::End => Key::End, KeyCode::Help => Key::Help, KeyCode::Home => Key::Home, KeyCode::Insert => Key::Insert, KeyCode::PageDown => Key::PageDown, KeyCode::PageUp => Key::PageUp, KeyCode::ArrowDown => Key::ArrowDown, KeyCode::ArrowLeft => Key::ArrowLeft, KeyCode::ArrowRight => Key::ArrowRight, KeyCode::ArrowUp => Key::ArrowUp, KeyCode::NumLock => Key::NumLock, KeyCode::Numpad0 => Key::Numpad0, KeyCode::Numpad1 => Key::Numpad1, KeyCode::Numpad2 => Key::Numpad2, KeyCode::Numpad3 => Key::Numpad3, KeyCode::Numpad4 => Key::Numpad4, KeyCode::Numpad5 => Key::Numpad5, KeyCode::Numpad6 => Key::Numpad6, KeyCode::Numpad7 => Key::Numpad7, KeyCode::Numpad8 => Key::Numpad8, KeyCode::Numpad9 => Key::Numpad9, KeyCode::NumpadAdd => Key::NumpadAdd, KeyCode::NumpadBackspace => Key::NumpadBackspace, KeyCode::NumpadClear => Key::NumpadClear, KeyCode::NumpadClearEntry => Key::NumpadClearEntry, KeyCode::NumpadComma => Key::NumpadComma, KeyCode::NumpadDecimal => Key::NumpadDecimal, KeyCode::NumpadDivide => Key::NumpadDivide, KeyCode::NumpadEnter => Key::NumpadEnter, KeyCode::NumpadEqual => Key::NumpadEqual, KeyCode::NumpadHash => Key::NumpadHash, KeyCode::NumpadMemoryAdd => Key::NumpadMemoryAdd, KeyCode::NumpadMemoryClear => Key::NumpadMemoryClear, KeyCode::NumpadMemoryRecall => Key::NumpadMemoryRecall, KeyCode::NumpadMemoryStore => Key::NumpadMemoryStore, KeyCode::NumpadMultiply => Key::NumpadMultiply, KeyCode::NumpadParenLeft => Key::NumpadParenLeft, KeyCode::NumpadParenRight => Key::NumpadParenRight, KeyCode::NumpadSubtract => Key::NumpadSubtract, KeyCode::Escape => Key::Escape, KeyCode::F1 => Key::F1, KeyCode::F2 => Key::F2, KeyCode::F3 => Key::F3, KeyCode::F4 => Key::F4, KeyCode::F5 => Key::F5, KeyCode::F6 => Key::F6, KeyCode::F7 => Key::F7, KeyCode::F8 => Key::F8, KeyCode::F9 => Key::F9, KeyCode::F10 => Key::F10, KeyCode::F11 => Key::F11, KeyCode::F12 => Key::F12, KeyCode::Fn => Key::Fn, KeyCode::FnLock => Key::FnLock, KeyCode::PrintScreen => Key::PrintScreen, KeyCode::ScrollLock => Key::ScrollLock, KeyCode::Pause => Key::Pause, KeyCode::Hiragana => Key::Hiragana, KeyCode::Katakana => Key::Katakana, _ => Key::Unknown, } } ================================================ FILE: mm-client/src/lib.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT pub mod audio; pub mod cursor; pub mod delegate; pub mod flash; pub mod font; pub mod gamepad; pub mod keys; pub mod overlay; pub mod render; pub mod stats; pub mod video; pub mod vulkan; ================================================ FILE: mm-client/src/overlay.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::{collections::VecDeque, vec}; use mm_protocol as protocol; use crate::stats::STATS; pub struct Overlay { streaming_width: u32, streaming_height: u32, codec: protocol::VideoCodec, video_latency_measurements: VecDeque, reposition: bool, } impl Overlay { pub fn new(fps: u32) -> Self { Self { streaming_width: 0, streaming_height: 0, codec: protocol::VideoCodec::H264, video_latency_measurements: VecDeque::from(vec![0.0; 10 * fps as usize]), reposition: true, } } pub fn reposition(&mut self) { self.reposition = true; } pub fn update_params(&mut self, params: &protocol::Attached) { self.streaming_width = params.streaming_resolution.as_ref().unwrap().width; self.streaming_height = params.streaming_resolution.as_ref().unwrap().height; self.codec = params.video_codec(); } pub fn build(&mut self, ui: &imgui::Ui) -> anyhow::Result<()> { // Record a latency measurement. let latency = STATS.video_latency(); self.video_latency_measurements.rotate_left(1); *self.video_latency_measurements.back_mut().unwrap() = latency; let [width, height] = ui.io().display_size; let [scale_x, scale_y] = ui.io().display_framebuffer_scale; let condition = if self.reposition { self.reposition = false; imgui::Condition::Always } else { imgui::Condition::Once }; let _padding = ui.push_style_var(imgui::StyleVar::WindowPadding([8.0, 8.0])); let _rounding = ui.push_style_var(imgui::StyleVar::WindowRounding(4.0)); let _frame_rounding = ui.push_style_var(imgui::StyleVar::FrameRounding(4.0)); if let Some(_window) = ui .window("overlay") .position([width - 16.0, 16.0], condition) .position_pivot([1.0, 0.0]) .title_bar(false) .scroll_bar(false) .no_nav() .movable(true) .resizable(true) .bg_alpha(0.8) .begin() { ui.set_window_font_scale(1.5); let _stretch = ui.push_item_width(-1.0); if let Some(_table) = ui.begin_table_with_flags("stats", 2, imgui::TableFlags::SIZING_FIXED_FIT) { stat_row( ui, "streaming res:", format!("{}x{}", self.streaming_width, self.streaming_height), ); stat_row( ui, "render res:", format!("{}x{}", width * scale_x, height * scale_y), ); stat_row( ui, "codec:", match self.codec { protocol::VideoCodec::H264 => "H.264", protocol::VideoCodec::H265 => "H.265", protocol::VideoCodec::Av1 => "AV1", _ => "unknown", }, ); stat_row( ui, "bitrate:", format!("{:.1} mbps", STATS.video_bitrate() / 1_000_000.0), ); } let [width, height] = ui.window_size(); let cursor_pos = ui.cursor_pos(); let measurements = self.video_latency_measurements.make_contiguous(); let max_latency = measurements.iter().copied().reduce(f32::max).unwrap(); let scale = (max_latency.round() as u32).next_multiple_of(10) * 2; ui.plot_lines("", measurements) .scale_min(0.0) .scale_max(scale as f32) .graph_size([width - 16.0, 50.0_f32.max(height - cursor_pos[1] - 8.0)]) .overlay_text(format!("latency: {:.1} ms", latency).as_str()) .build(); } Ok(()) } } fn stat_row(ui: &imgui::Ui, label: impl AsRef, value: impl AsRef) { ui.table_next_row(); ui.table_next_column(); let cursor_pos = ui.cursor_pos(); let pos_x = cursor_pos[0] + ui.column_width(0) - ui.calc_text_size(&label)[0]; if pos_x > cursor_pos[0] { ui.set_cursor_pos([pos_x, cursor_pos[1]]); } ui.text_colored([0.6, 0.6, 0.6, 1.0], label); ui.table_next_column(); ui.text(value); } ================================================ FILE: mm-client/src/render.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT #![allow(clippy::missing_safety_doc)] use std::sync::Arc; use std::time; use anyhow::{anyhow, Context, Result}; use ash::vk; use cstr::cstr; use imgui_rs_vulkan_renderer as imgui_vulkan; use tracing::debug; use tracing::instrument; use tracing::trace; use tracing::trace_span; use tracing::warn; use tracy_client::span_location; use crate::font; use crate::video::*; use crate::vulkan::*; const FONT_SIZE: f32 = 8.0; // Matches the definition in render.slang. #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[repr(u32)] enum TextureColorSpace { Bt709 = 0, Bt2020Pq = 1, } impl From for TextureColorSpace { fn from(cs: crate::video::ColorSpace) -> Self { match cs { crate::video::ColorSpace::Bt709 => TextureColorSpace::Bt709, crate::video::ColorSpace::Bt2020Pq => TextureColorSpace::Bt2020Pq, } } } #[derive(Copy, Clone, Debug)] #[repr(C)] struct PushConstants { aspect: glam::Vec2, texture_color_space: TextureColorSpace, output_color_space: vk::ColorSpaceKHR, } pub struct Renderer { width: u32, height: u32, scale_factor: f64, hdr_mode: bool, imgui: imgui::Context, imgui_platform: imgui_winit_support::WinitPlatform, imgui_font: font_kit::font::Font, imgui_fontid_big: imgui::FontId, imgui_time: time::Instant, swapchain: Option, swapchain_dirty: bool, new_video_texture: Option<(Arc, VideoStreamParams)>, vk: Arc, window: Arc, } struct VideoTexture { image: Arc, view: vk::ImageView, color_space: TextureColorSpace, } struct Swapchain { swapchain: vk::SwapchainKHR, frames: Vec, present_images: Vec, current_frame: usize, sampler_conversion: vk::SamplerYcbcrConversion, sampler: vk::Sampler, bound_video_texture: Option, /// The normalized relationship between the output and the video texture, /// after scaling. For example, a 500x500 video texture in a 1000x500 /// swapchain would have the aspect (2.0, 1.0), as would a 250x250 texture. aspect: (f64, f64), surface_format: vk::SurfaceFormatKHR, descriptor_set_layout: vk::DescriptorSetLayout, descriptor_pool: vk::DescriptorPool, pipeline_layout: vk::PipelineLayout, pipeline: vk::Pipeline, imgui_renderer: imgui_vulkan::Renderer, } struct InFlightFrame { render_cb: vk::CommandBuffer, render_fence: vk::Fence, image_acquired_sema: vk::Semaphore, render_complete_sema: vk::Semaphore, descriptor_set: vk::DescriptorSet, ts_pool: VkTimestampQueryPool, tracy_span: Option, } struct SwapImage { image: vk::Image, view: vk::ImageView, } impl Renderer { pub fn new( vk: Arc, window: Arc, hdr_mode: bool, ) -> Result { let window_size = window.inner_size(); let scale_factor = window.scale_factor(); let mut imgui = imgui::Context::create(); imgui.set_ini_filename(None); let mut imgui_platform = imgui_winit_support::WinitPlatform::new(&mut imgui); imgui_platform.attach_window( imgui.io_mut(), &window, imgui_winit_support::HiDpiMode::Default, ); let imgui_font = font::load_ui_font()?; let imgui_fontid_big = import_imgui_font(&mut imgui, &imgui_font, FONT_SIZE, scale_factor)?; let mut renderer = Self { width: window_size.width, height: window_size.height, scale_factor, hdr_mode, window, imgui, imgui_platform, imgui_font, imgui_fontid_big, imgui_time: time::Instant::now(), swapchain: None, swapchain_dirty: false, new_video_texture: None, vk, }; unsafe { renderer.recreate_swapchain()? }; Ok(renderer) } #[instrument(skip_all, level = "trace")] unsafe fn recreate_swapchain(&mut self) -> Result<()> { let start = time::Instant::now(); let device = &self.vk.device; let surface_format = select_surface_format(self.vk.clone(), self.hdr_mode)?; let surface_capabilities = self .vk .surface_loader .get_physical_device_surface_capabilities(self.vk.pdevice, self.vk.surface) .unwrap(); let mut desired_image_count = surface_capabilities.min_image_count + 1; if surface_capabilities.max_image_count > 0 && desired_image_count > surface_capabilities.max_image_count { desired_image_count = surface_capabilities.max_image_count; } let surface_resolution = match surface_capabilities.current_extent.width { std::u32::MAX => vk::Extent2D { width: self.width, height: self.height, }, _ => surface_capabilities.current_extent, }; let pre_transform = if surface_capabilities .supported_transforms .contains(vk::SurfaceTransformFlagsKHR::IDENTITY) { vk::SurfaceTransformFlagsKHR::IDENTITY } else { surface_capabilities.current_transform }; let present_modes = self .vk .surface_loader .get_physical_device_surface_present_modes(self.vk.pdevice, self.vk.surface) .unwrap(); let mut present_modes = present_modes.clone(); present_modes.sort_by_key(|&mode| match mode { vk::PresentModeKHR::MAILBOX => 0, vk::PresentModeKHR::FIFO => 1, vk::PresentModeKHR::IMMEDIATE => 2, _ => 4, }); let present_mode = present_modes.first().unwrap(); if *present_mode != vk::PresentModeKHR::MAILBOX { warn!( "present mode MAILBOX not available, using {:?} (available: {:?})", present_mode, present_modes ); } let mut swapchain_create_info = vk::SwapchainCreateInfoKHR::default() .surface(self.vk.surface) .min_image_count(desired_image_count) .image_color_space(surface_format.color_space) .image_format(surface_format.format) .image_extent(surface_resolution) .image_usage(vk::ImageUsageFlags::COLOR_ATTACHMENT) .image_sharing_mode(vk::SharingMode::EXCLUSIVE) .pre_transform(pre_transform) .composite_alpha(vk::CompositeAlphaFlagsKHR::OPAQUE) .present_mode(*present_mode) .clipped(true) .image_array_layers(1); if let Some(old_swapchain) = self.swapchain.as_ref() { swapchain_create_info = swapchain_create_info.old_swapchain(old_swapchain.swapchain); } let swapchain = self .vk .swapchain_loader .create_swapchain(&swapchain_create_info, None)?; let swapchain_images = self.vk.swapchain_loader.get_swapchain_images(swapchain)?; // TODO: rather than recreate the swapchain if the video texture // changes, we can just recreate the pipeline. This is tricky because // we create a descriptor set for each SwapFrame, which refers to the // layout, which includes the immutable sampler. // We need to create a sampler, even if we don't have a video stream yet // and don't know what the fields should be. let (video_texture_format, video_params) = match self.new_video_texture.as_ref() { Some((tex, params)) => (tex.format, *params), None => ( vk::Format::G8_B8_R8_3PLANE_420_UNORM, VideoStreamParams::default(), ), }; let sampler_conversion = create_ycbcr_sampler_conversion(device, video_texture_format, &video_params)?; let sampler = { let mut conversion_info = vk::SamplerYcbcrConversionInfo::default().conversion(sampler_conversion); let create_info = vk::SamplerCreateInfo::default() .mag_filter(vk::Filter::LINEAR) .min_filter(vk::Filter::LINEAR) .compare_enable(true) .address_mode_u(vk::SamplerAddressMode::CLAMP_TO_EDGE) .address_mode_v(vk::SamplerAddressMode::CLAMP_TO_EDGE) .address_mode_w(vk::SamplerAddressMode::CLAMP_TO_EDGE) .push_next(&mut conversion_info); unsafe { device.create_sampler(&create_info, None)? } }; let bound_video_texture = if let Some((tex, params)) = self.new_video_texture.as_ref() { let view = create_image_view( &self.vk.device, tex.image, tex.format, Some(sampler_conversion), )?; // Increment the reference count on the texture. Some(VideoTexture { image: tex.clone(), view, color_space: params.color_space.into(), }) } else { None }; let aspect = if let Some(tex) = bound_video_texture.as_ref() { calculate_aspect(self.width, self.height, tex.image.width, tex.image.height) } else { (1.0, 1.0) }; let descriptor_set_layout = { // We're required to use an immutable sampler for YCbCr conversion // by the vulkan spec. let samplers = [sampler]; let binding = vk::DescriptorSetLayoutBinding::default() .binding(0) .descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER) .descriptor_count(1) .stage_flags(vk::ShaderStageFlags::FRAGMENT) .immutable_samplers(&samplers); unsafe { device.create_descriptor_set_layout( &vk::DescriptorSetLayoutCreateInfo::default().bindings(&[binding]), None, )? } }; let descriptor_pool = { let binding_multiplier = get_ycbcr_conversion_properties( self.vk.pdevice, &self.vk.instance, video_texture_format, )? .combined_image_sampler_descriptor_count; let sampler_size = [vk::DescriptorPoolSize::default() .ty(vk::DescriptorType::COMBINED_IMAGE_SAMPLER) .descriptor_count(swapchain_images.len() as u32 * binding_multiplier)]; let info = vk::DescriptorPoolCreateInfo::default() .pool_sizes(&sampler_size) .max_sets(swapchain_images.len() as u32); unsafe { device.create_descriptor_pool(&info, None)? } }; let pipeline_layout = { let pc_ranges = [vk::PushConstantRange::default() .stage_flags(vk::ShaderStageFlags::VERTEX | vk::ShaderStageFlags::FRAGMENT) .offset(0) .size(std::mem::size_of::() as u32)]; let set_layouts = [descriptor_set_layout]; let create_info = vk::PipelineLayoutCreateInfo::default() .set_layouts(&set_layouts) .push_constant_ranges(&pc_ranges); unsafe { device.create_pipeline_layout(&create_info, None)? } }; let pipeline = { let vert_bytes = include_bytes!(concat!(env!("OUT_DIR"), "/shaders/vert.spv")); let frag_bytes = include_bytes!(concat!(env!("OUT_DIR"), "/shaders/frag.spv")); let vert_shader = load_shader(device, vert_bytes).context("loading vert.spv")?; let frag_shader = load_shader(device, frag_bytes).context("loading frag.spv")?; let vert_stage = vk::PipelineShaderStageCreateInfo::default() .stage(vk::ShaderStageFlags::VERTEX) .module(vert_shader) .name(cstr!("main")); let frag_stage = vk::PipelineShaderStageCreateInfo::default() .stage(vk::ShaderStageFlags::FRAGMENT) .module(frag_shader) .name(cstr!("main")); let vertex_input_state = vk::PipelineVertexInputStateCreateInfo::default(); let input_assembly_state = vk::PipelineInputAssemblyStateCreateInfo::default() .topology(vk::PrimitiveTopology::TRIANGLE_STRIP) .primitive_restart_enable(false); let viewport = [vk::Viewport::default() .x(0.0) .y(0.0) .width(self.width as f32) .height(self.height as f32) .min_depth(0.0) .max_depth(1.0)]; let scissor = [vk::Rect2D::default().extent(vk::Extent2D { width: self.width, height: self.height, })]; let viewport_state = vk::PipelineViewportStateCreateInfo::default() .viewports(&viewport) .scissors(&scissor); let rasterization_state = vk::PipelineRasterizationStateCreateInfo::default() .depth_clamp_enable(false) .rasterizer_discard_enable(false) .polygon_mode(vk::PolygonMode::FILL) .line_width(1.0) .depth_bias_enable(false) // Per https://www.saschawillems.de/blog/2016/08/13/vulkan-tutorial-on-rendering-a-fullscreen-quad-without-buffers .cull_mode(vk::CullModeFlags::FRONT) .front_face(vk::FrontFace::COUNTER_CLOCKWISE); let multisample_state = vk::PipelineMultisampleStateCreateInfo::default() .sample_shading_enable(false) .rasterization_samples(vk::SampleCountFlags::TYPE_1); let attachment = [vk::PipelineColorBlendAttachmentState::default() .color_write_mask(vk::ColorComponentFlags::RGBA) .blend_enable(true) .src_color_blend_factor(vk::BlendFactor::SRC_ALPHA) .dst_color_blend_factor(vk::BlendFactor::ONE_MINUS_SRC_ALPHA) .color_blend_op(vk::BlendOp::ADD) .src_alpha_blend_factor(vk::BlendFactor::ONE) .dst_alpha_blend_factor(vk::BlendFactor::ZERO) .alpha_blend_op(vk::BlendOp::ADD)]; let color_blend_state = vk::PipelineColorBlendStateCreateInfo::default() .logic_op_enable(false) .attachments(&attachment); let formats = [surface_format.format]; let mut pipeline_rendering = vk::PipelineRenderingCreateInfo::default().color_attachment_formats(&formats); let stages = [vert_stage, frag_stage]; let create_info = vk::GraphicsPipelineCreateInfo::default() .stages(&stages) .vertex_input_state(&vertex_input_state) .input_assembly_state(&input_assembly_state) .viewport_state(&viewport_state) .rasterization_state(&rasterization_state) .multisample_state(&multisample_state) .color_blend_state(&color_blend_state) .layout(pipeline_layout) .push_next(&mut pipeline_rendering); unsafe { let pipeline = match device.create_graphics_pipelines( vk::PipelineCache::null(), &[create_info], None, ) { Ok(pipelines) => Ok(pipelines[0]), Err((_, e)) => Err(e), }?; device.destroy_shader_module(vert_shader, None); device.destroy_shader_module(frag_shader, None); pipeline } }; let create_frame = || -> Result { let render_cb = { let create_info = vk::CommandBufferAllocateInfo::default() .level(vk::CommandBufferLevel::PRIMARY) .command_pool(self.vk.present_queue.command_pool) .command_buffer_count(1); let cbs = device .allocate_command_buffers(&create_info) .context("failed to allocate render command buffer")?; cbs[0] }; let descriptor_set = { let layouts = &[descriptor_set_layout]; let create_info = vk::DescriptorSetAllocateInfo::default() .descriptor_pool(descriptor_pool) .set_layouts(layouts); let ds = device .allocate_descriptor_sets(&create_info)? .pop() .unwrap(); // TODO: do the write in bind_video_texture? if let Some(tex) = bound_video_texture.as_ref() { let info = [vk::DescriptorImageInfo::default() .image_layout(vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL) .image_view(tex.view)]; let sampler_write = vk::WriteDescriptorSet::default() .dst_set(ds) .dst_binding(0) .dst_array_element(0) .descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER) .image_info(&info); device.update_descriptor_sets(&[sampler_write], &[]); } ds }; let render_fence = create_fence(device, true)?; let image_acquired_sema = create_semaphore(device)?; let render_complete_sema = create_semaphore(device)?; let ts_pool = create_timestamp_query_pool(device, 2)?; Ok(InFlightFrame { render_cb, render_fence, image_acquired_sema, render_complete_sema, descriptor_set, ts_pool, tracy_span: None, }) }; let frames = (0..swapchain_images.len()) .map(|_| create_frame()) .collect::>>()?; let swapchain_images = swapchain_images .into_iter() .map(|image| { let image_view = create_image_view(device, image, surface_format.format, None)?; Ok(SwapImage { image, view: image_view, }) }) .collect::>>()?; let mut imgui_renderer = imgui_vulkan::Renderer::with_default_allocator( &self.vk.instance, self.vk.pdevice, self.vk.device.clone(), self.vk.present_queue.queue, self.vk.present_queue.command_pool, imgui_vulkan::DynamicRendering { color_attachment_format: surface_format.format, depth_attachment_format: None, }, &mut self.imgui, Some(imgui_vulkan::Options { in_flight_frames: frames.len(), ..Default::default() }), )?; imgui_renderer.update_fonts_texture( self.vk.present_queue.queue, self.vk.present_queue.command_pool, &mut self.imgui, )?; let swapchain = Swapchain { swapchain, frames, present_images: swapchain_images, current_frame: 0, descriptor_pool, descriptor_set_layout, sampler_conversion, sampler, bound_video_texture, aspect, surface_format, pipeline_layout, pipeline, imgui_renderer, }; debug!("recreated swapchain in {:?}", start.elapsed()); if let Some(old_swapchain) = self.swapchain.replace(swapchain) { self.destroy_swapchain(old_swapchain); }; Ok(()) } pub fn handle_event(&mut self, event: &winit::event::WindowEvent) -> anyhow::Result<()> { let now = time::Instant::now(); self.imgui.io_mut().update_delta_time(now - self.imgui_time); self.imgui_time = now; let wrapped: winit::event::Event<()> = winit::event::Event::WindowEvent { window_id: self.window.id(), event: event.clone(), }; self.imgui_platform .handle_event(self.imgui.io_mut(), self.window.as_ref(), &wrapped); match event { winit::event::WindowEvent::Resized(size) => { self.resize(size.width, size.height); } winit::event::WindowEvent::ScaleFactorChanged { scale_factor, .. } => { self.scale_factor_changed(*scale_factor)?; } _ => (), } Ok(()) } pub fn resize(&mut self, width: u32, height: u32) { if self.width == width && self.height == height { return; } self.width = width; self.height = height; self.swapchain_dirty = true; } fn scale_factor_changed(&mut self, scale_factor: f64) -> anyhow::Result<()> { if self.scale_factor == scale_factor { return Ok(()); } // Resize fonts. self.imgui_fontid_big = import_imgui_font(&mut self.imgui, &self.imgui_font, FONT_SIZE, scale_factor)?; self.scale_factor = scale_factor; Ok(()) } pub fn bind_video_texture( &mut self, texture: Arc, params: VideoStreamParams, ) -> Result<()> { // TODO: no need to recreate the sampler if the params match. self.new_video_texture = Some((texture, params)); self.swapchain_dirty = true; Ok(()) } // Returns the normalized relationship between the output dimensions and the // video texture dimensions, after scaling. For example, if the video // texture is 250x250 and the output is 1000x500, the aspect would be (2.0, // 1.0). pub fn get_texture_aspect(&self) -> Option<(f64, f64)> { if let Some(Swapchain { bound_video_texture: Some(_), aspect, .. }) = self.swapchain.as_ref() { Some(*aspect) } else { None } } #[instrument(skip_all, level = "trace")] pub unsafe fn render(&mut self, ui_builder: F) -> Result<()> where F: FnOnce(&imgui::Ui) -> anyhow::Result<()>, { if self.swapchain_dirty || self.swapchain.is_none() { self.recreate_swapchain()?; self.swapchain_dirty = false; } let device = &self.vk.device; let swapchain = self.swapchain.as_mut().unwrap(); let num_frames = swapchain.frames.len(); let frame = &mut swapchain.frames[swapchain.current_frame]; swapchain.current_frame = (swapchain.current_frame + 1) % num_frames; // Wait for the gpu to catch up. device.wait_for_fences(&[frame.render_fence], true, u64::MAX)?; // Trace the frame on the GPU side. if let Some(ctx) = &self.vk.tracy_context { if let Some(span) = frame.tracy_span.take() { let timestamps = frame.ts_pool.fetch_results(device)?; span.upload_timestamp(timestamps[0], timestamps[1]); } frame.tracy_span = Some(ctx.span(span_location!())?); } let result = self.vk.swapchain_loader.acquire_next_image( swapchain.swapchain, u64::MAX, frame.image_acquired_sema, vk::Fence::null(), ); let swapchain_index = match result { Ok((image_index, _)) => image_index, Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => { // Recreate and try again. self.swapchain_dirty = true; return self.render(ui_builder); } Err(e) => return Err(e.into()), }; let present_image = swapchain .present_images .get(swapchain_index as usize) .unwrap(); // Reset the command buffer. device.reset_command_buffer(frame.render_cb, vk::CommandBufferResetFlags::empty())?; // Begin the command buffer. { let begin_info = vk::CommandBufferBeginInfo::default() .flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT); device.begin_command_buffer(frame.render_cb, &begin_info)?; } // Record the start timestamp. frame.ts_pool.cmd_reset(device, frame.render_cb); device.cmd_write_timestamp( frame.render_cb, vk::PipelineStageFlags::TOP_OF_PIPE, frame.ts_pool.pool, 0, ); // Transition the present image to be writable. cmd_image_barrier( device, frame.render_cb, present_image.image, vk::PipelineStageFlags::TOP_OF_PIPE, vk::AccessFlags::empty(), vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT, vk::AccessFlags::COLOR_ATTACHMENT_WRITE, vk::ImageLayout::UNDEFINED, vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL, ); // Begin rendering. { let rect: vk::Rect2D = vk::Rect2D::default().extent(vk::Extent2D { width: self.width, height: self.height, }); let clear_value = vk::ClearValue { color: vk::ClearColorValue { float32: [0.0, 0.0, 0.0, 1.0], }, }; let color_attachment = vk::RenderingAttachmentInfo::default() .image_view(present_image.view) .image_layout(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL) .load_op(vk::AttachmentLoadOp::CLEAR) .store_op(vk::AttachmentStoreOp::STORE) .clear_value(clear_value); let color_attachments = [color_attachment]; let rendering_info = vk::RenderingInfo::default() .render_area(rect) .color_attachments(&color_attachments) .layer_count(1); self.vk .dynamic_rendering_loader .cmd_begin_rendering(frame.render_cb, &rendering_info); device.cmd_bind_pipeline( frame.render_cb, vk::PipelineBindPoint::GRAPHICS, swapchain.pipeline, ); } if self.new_video_texture.is_none() || swapchain.aspect != (1.0, 1.0) { // TODO Draw the background // https://www.toptal.com/designers/subtlepatterns/prism/ } // Draw the video texture. if let Some(tex) = &swapchain.bound_video_texture { let pc = PushConstants { aspect: glam::Vec2::new(swapchain.aspect.0 as f32, swapchain.aspect.1 as f32), texture_color_space: tex.color_space, output_color_space: swapchain.surface_format.color_space, }; device.cmd_push_constants( frame.render_cb, swapchain.pipeline_layout, vk::ShaderStageFlags::VERTEX | vk::ShaderStageFlags::FRAGMENT, 0, std::slice::from_raw_parts( &pc as *const _ as *const u8, std::mem::size_of::(), ), ); device.cmd_bind_descriptor_sets( frame.render_cb, vk::PipelineBindPoint::GRAPHICS, swapchain.pipeline_layout, 0, &[frame.descriptor_set], &[], ); // Draw the video texture. device.cmd_draw(frame.render_cb, 4, 1, 0, 0); } // Draw the overlay. { self.imgui_platform .prepare_frame(self.imgui.io_mut(), &self.window)?; { let ui = self.imgui.new_frame(); let _font_stack = ui.push_font(self.imgui_fontid_big); ui_builder(ui)?; self.imgui_platform.prepare_render(ui, &self.window); } swapchain .imgui_renderer .cmd_draw(frame.render_cb, self.imgui.render())?; }; // Done rendereng. self.vk .dynamic_rendering_loader .cmd_end_rendering(frame.render_cb); // Transition the present image to be presentable. cmd_image_barrier( device, frame.render_cb, present_image.image, vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT, vk::AccessFlags::COLOR_ATTACHMENT_WRITE, vk::PipelineStageFlags::BOTTOM_OF_PIPE, vk::AccessFlags::empty(), vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL, vk::ImageLayout::PRESENT_SRC_KHR, ); // Record the end timestamp. device.cmd_write_timestamp( frame.render_cb, vk::PipelineStageFlags::BOTTOM_OF_PIPE, frame.ts_pool.pool, 1, ); if let Some(span) = &mut frame.tracy_span { span.end_zone(); } // Submit and present! { let present_queue = self.vk.present_queue.queue; device.end_command_buffer(frame.render_cb)?; device.reset_fences(&[frame.render_fence])?; let cbs = [frame.render_cb]; let wait_semas = [frame.image_acquired_sema]; let signal_semas = [frame.render_complete_sema]; let wait_stages = [vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT]; let submit_info = vk::SubmitInfo::default() .command_buffers(&cbs) .wait_semaphores(&wait_semas) .wait_dst_stage_mask(&wait_stages) .signal_semaphores(&signal_semas); trace!(queue = ?present_queue, "queue submit for render"); device.queue_submit(present_queue, &[submit_info], frame.render_fence)?; // This "helps winit [with stuff]". It also seems to increase latency. self.window.pre_present_notify(); trace!(queue = ?present_queue, index = swapchain_index, "queue present"); let wait_semas = [frame.render_complete_sema]; let swapchains = [swapchain.swapchain]; let image_indices = [swapchain_index]; let present_info = vk::PresentInfoKHR::default() .wait_semaphores(&wait_semas) .swapchains(&swapchains) .image_indices(&image_indices); let res = trace_span!("render.queue_present").in_scope(|| { self.vk .swapchain_loader .queue_present(present_queue, &present_info) }); self.swapchain_dirty = match res { Ok(false) => false, Ok(true) => true, Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => true, Err(e) => return Err(e.into()), }; } tracy_client::frame_mark(); Ok(()) } unsafe fn destroy_swapchain(&mut self, mut swapchain: Swapchain) { let device = &self.vk.device; device.device_wait_idle().unwrap(); for frame in swapchain.frames.drain(..) { device.free_command_buffers(self.vk.present_queue.command_pool, &[frame.render_cb]); device.destroy_fence(frame.render_fence, None); device.destroy_semaphore(frame.image_acquired_sema, None); device.destroy_semaphore(frame.render_complete_sema, None); device.destroy_query_pool(frame.ts_pool.pool, None); } for swap_img in swapchain.present_images.drain(..) { // Destroying the swapchain does this. // device.destroy_image(swap_img.image, None); device.destroy_image_view(swap_img.view, None); } device.destroy_pipeline_layout(swapchain.pipeline_layout, None); device.destroy_descriptor_pool(swapchain.descriptor_pool, None); device.destroy_descriptor_set_layout(swapchain.descriptor_set_layout, None); device.destroy_sampler(swapchain.sampler, None); device.destroy_sampler_ycbcr_conversion(swapchain.sampler_conversion, None); if let Some(tex) = swapchain.bound_video_texture.take() { device.destroy_image_view(tex.view, None); // We probably drop the last reference to the image here, which then // gets destroyed. } device.destroy_pipeline(swapchain.pipeline, None); self.vk .swapchain_loader .destroy_swapchain(swapchain.swapchain, None) } } fn select_surface_format( vk: Arc, hdr_mode: bool, ) -> Result { let mut surface_formats = unsafe { vk.surface_loader .get_physical_device_surface_formats(vk.pdevice, vk.surface)? }; let preferred_formats = [ vk::Format::R16G16B16A16_SFLOAT, vk::Format::R8G8B8A8_UNORM, vk::Format::B8G8R8A8_UNORM, ]; let preferred_color_spaces = if hdr_mode { vec![ vk::ColorSpaceKHR::HDR10_ST2084_EXT, vk::ColorSpaceKHR::EXTENDED_SRGB_LINEAR_EXT, vk::ColorSpaceKHR::DISPLAY_P3_NONLINEAR_EXT, vk::ColorSpaceKHR::SRGB_NONLINEAR, ] } else { vec![ vk::ColorSpaceKHR::BT709_NONLINEAR_EXT, vk::ColorSpaceKHR::SRGB_NONLINEAR, ] }; surface_formats.sort_by_key(|sf| { let color_space_score = preferred_color_spaces .iter() .position(|&cs| cs == sf.color_space) .unwrap_or(preferred_color_spaces.len()); let format_score = preferred_formats .iter() .position(|&f| f == sf.format) .unwrap_or(preferred_formats.len()); (color_space_score, format_score) }); let surface_format = surface_formats[0]; debug!(?surface_format, "selected surface format"); Ok(surface_format) } impl Drop for Renderer { fn drop(&mut self) { if let Some(swapchain) = self.swapchain.take() { unsafe { self.destroy_swapchain(swapchain); }; } } } fn import_imgui_font( imgui: &mut imgui::Context, font: &font_kit::font::Font, size: f32, scale_factor: f64, ) -> anyhow::Result { let font_size = size * scale_factor as f32; imgui.io_mut().font_global_scale = (1.0 / scale_factor) as f32; let data = match font.copy_font_data() { Some(data) => data, None => return Err(anyhow!("failed to load font data for {:?}", font)), }; let id = imgui.fonts().add_font(&[imgui::FontSource::TtfData { size_pixels: font_size, data: &data, config: Some(imgui::FontConfig { pixel_snap_h: true, oversample_h: 4, oversample_v: 4, ..imgui::FontConfig::default() }), }]); Ok(id) } fn calculate_aspect(width: u32, height: u32, tex_width: u32, tex_height: u32) -> (f64, f64) { let width = width as f64; let height = height as f64; let tex_width = tex_width as f64; let tex_height = tex_height as f64; let window_aspect = width / height; let texture_aspect = tex_width / tex_height; if window_aspect > texture_aspect { // Screen too wide. let scale = height / tex_height; (width / (tex_width * scale), 1.0) } else if window_aspect < texture_aspect { // Screen too tall. let scale = width / tex_width; (1.0, height / (tex_height * scale)) } else { (1.0, 1.0) } } ================================================ FILE: mm-client/src/render.slang ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT import color; // Should match the definition in render.rs enum TextureColorSpace { Bt709 = 0, Bt2020Pq = 1, } struct PushConstants { float2 aspect; TextureColorSpace texture_color_space; int vk_color_space; }; static const int VK_COLOR_SPACE_SRGB_NONLINEAR_EXT = 0; static const int VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1_000_104_002; // static const int VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1_000_104_001; static const int VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1_000_104_006; static const int VK_COLOR_SPACE_HDR10_ST2084_EXT = 1_000_104_008; [[vk::push_constant]] const PushConstants pc; const Sampler2D texture; struct VertOutput { float2 uv : TextureCoord; float4 position : SV_Position; }; [shader("vertex")] VertOutput vert(uint vertexID: SV_VertexID) { VertOutput output; let uv = float2((vertexID << 1) & 2, vertexID & 2) / 2.0; output.uv = uv; output.position = float4((uv * 2.0f - 1.0f) / pc.aspect, 0.0f, 1.0f); return output; } float3 bt709_to_display(float3 color, int vk_color_space) { if (vk_color_space == VK_COLOR_SPACE_BT709_NONLINEAR_EXT) { return color; } let linear = bt709_eotf(color); switch (vk_color_space) { case VK_COLOR_SPACE_SRGB_NONLINEAR_EXT: return srgb_inverse_eotf(linear); case VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT: return linear; // case VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT: // return srgb_inverse_eotf(transform(color, Primaries::BT709, Primaries::P3)) case VK_COLOR_SPACE_HDR10_ST2084_EXT: return pq_inverse_eotf(transform(linear, Primaries::BT709, Primaries::BT2020)); default: return srgb_inverse_eotf(linear); } } float3 bt2020_pq_to_display(float3 color, int vk_color_space) { if (vk_color_space == VK_COLOR_SPACE_HDR10_ST2084_EXT) { return color; } let linear = transform(pq_eotf(color) * PQ_MAX_WHITE / SDR_REFERENCE_WHITE, Primaries::BT2020, Primaries::BT709); switch (vk_color_space) { case VK_COLOR_SPACE_SRGB_NONLINEAR_EXT: return srgb_inverse_eotf(linear); case VK_COLOR_SPACE_BT709_NONLINEAR_EXT: return bt709_inverse_eotf(clamp(linear, 0.0, 1.0)); case VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT: return linear; // case VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT: // return srgb_inverse_eotf(transform(color, Primaries::BT2020, Primaries::P3)) default: return srgb_inverse_eotf(linear); } } [shader("fragment")] float4 frag(float2 uv: TextureCoord) : SV_Target { float4 color = clamp(texture.Sample(uv), 0.0, 1.0); // When sampling the video texture, vulkan does the matrix multiplication // for us, but doesn't apply any transfer function, so the values are // still nonlinear in either BT.709 or BT.2020/ST2048. switch (pc.texture_color_space) { case TextureColorSpace::Bt709: return float4(bt709_to_display(color.rgb, pc.vk_color_space), 1.0); case TextureColorSpace::Bt2020Pq: return float4(bt2020_pq_to_display(color.rgb, pc.vk_color_space), 1.0); default: return float4(0.0, 0.5, 1.0, 1.0); } } ================================================ FILE: mm-client/src/stats.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::{ collections::BTreeMap, sync::{Arc, RwLock}, time, }; use lazy_static::lazy_static; use simple_moving_average::{SingleSumSMA, SMA as _}; lazy_static! { pub static ref STATS: Arc = Arc::new(Stats::default()); } #[derive(Default)] pub struct Stats { inner: RwLock, } struct InFlightFrame(time::Instant); struct Inner { in_flight_frames: BTreeMap<(u64, u64), InFlightFrame>, video_bitrate: SingleSumSMA, video_bytes: u64, last_frame: time::Instant, connection_rtt: time::Duration, video_latency: SingleSumSMA, } impl Stats { pub fn set_connection_rtt(&self, rtt: time::Duration) { self.inner.write().unwrap().connection_rtt = rtt; } /// Starts tracking a frame. pub fn frame_received(&self, stream_seq: u64, seq: u64, len: usize) { let now = time::Instant::now(); let mut inner = self.inner.write().unwrap(); inner .in_flight_frames .entry((stream_seq, seq)) .or_insert(InFlightFrame(now)); inner.video_bytes += len as u64; } /// Tracks the total frame time. Should be called right before the frame is /// rendered. pub fn frame_rendered(&self, stream_seq: u64, seq: u64) { let now = time::Instant::now(); let mut inner = self.inner.write().unwrap(); // Add a bitrate measurement. let duration = (now - inner.last_frame).as_secs_f32(); inner.last_frame = now; let sample = inner.video_bytes as f32 * 8.0 / duration; inner.video_bitrate.add_sample(sample); inner.video_bytes = 0; // Finish tracking the frame, and measure latency. if let Some(frame) = inner.in_flight_frames.remove(&(stream_seq, seq)) { inner .video_latency .add_sample((now - frame.0).as_nanos() as u64) } } pub fn frame_discarded(&self, stream_seq: u64, seq: u64) { self.inner .write() .unwrap() .in_flight_frames .remove(&(stream_seq, seq)); } /// Returns the average video bitrate in bits per second. pub fn video_bitrate(&self) -> f32 { self.inner.read().unwrap().video_bitrate.get_average() } /// Returns the average total video latency in milliseconds. pub fn video_latency(&self) -> f32 { let inner = self.inner.read().unwrap(); let avg = inner.video_latency.get_average() + inner.connection_rtt.as_nanos() as u64; avg as f32 / 1_000_000.0 } } impl Default for Inner { fn default() -> Self { Self { in_flight_frames: BTreeMap::new(), video_bitrate: SingleSumSMA::new(), video_bytes: 0, last_frame: time::Instant::now(), connection_rtt: time::Duration::ZERO, video_latency: SingleSumSMA::new(), } } } ================================================ FILE: mm-client/src/video.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::{ sync::{mpsc, Arc}, time, }; use anyhow::{anyhow, bail, Context}; use ash::vk; use bytes::{Bytes, BytesMut}; use ffmpeg_next as ffmpeg; use ffmpeg_sys_next as ffmpeg_sys; use mm_client_common as client; use mm_protocol as protocol; use tracing::{debug, error, instrument, trace, trace_span, warn}; use crate::{stats::STATS, vulkan::*}; const DECODER_INIT_TIMEOUT: time::Duration = time::Duration::from_secs(5); type Undecoded = std::sync::Arc; #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] pub struct FrameMetadata { pub stream_seq: u64, pub seq: u64, pub pts: u64, } #[derive(Debug, Clone)] struct YUVPicture { planes: [Bytes; 3], num_planes: usize, info: FrameMetadata, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ColorSpace { Bt709, Bt2020Pq, } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct VideoStreamParams { pub width: u32, pub height: u32, pub color_space: ColorSpace, pub color_full_range: bool, } impl Default for VideoStreamParams { fn default() -> Self { Self { width: 0, height: 0, color_space: ColorSpace::Bt709, color_full_range: false, } } } pub enum VideoStreamEvent { VideoStreamReady(Arc, VideoStreamParams), VideoFrameAvailable, } enum StreamState { Empty, Initializing(DecoderInit), Streaming(CPUDecoder), Restarting(CPUDecoder, DecoderInit), } impl std::fmt::Debug for StreamState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { StreamState::Empty => write!(f, "Empty"), StreamState::Initializing(init) => write!(f, "Initializing({})", init.stream_seq), StreamState::Streaming(dec) => write!(f, "Streaming({})", dec.stream_seq), StreamState::Restarting(dec, init) => write!( f, "RestartingStream({} -> {})", dec.stream_seq, init.stream_seq ), } } } pub struct VideoStream + Send + 'static> { state: StreamState, proxy: winit::event_loop::EventLoopProxy, vk: Arc, } impl + Send + 'static> VideoStream { pub fn new(vk: Arc, proxy: winit::event_loop::EventLoopProxy) -> Self { Self { state: StreamState::Empty, proxy, vk, } } /// Initiates a restart of the current video stream. The restart completes /// once enough packets have been received to determine the stream metadata, /// at which point a VideoStreamReady event is sent with the new texture. pub fn reset( &mut self, stream_seq: u64, width: u32, height: u32, codec: protocol::VideoCodec, ) -> anyhow::Result<()> { debug!( stream_seq, width, height, ?codec, "starting or restarting video stream" ); let init = DecoderInit::new(self.vk.clone(), stream_seq, codec, width, height)?; use StreamState::*; let state = std::mem::replace(&mut self.state, Empty); self.state = match state { Empty | Initializing(_) => Initializing(init), Streaming(renderer) | Restarting(renderer, _) => Restarting(renderer, init), }; trace!(state = ?self.state, "video stream updated"); Ok(()) } pub fn recv_packet(&mut self, buf: Undecoded) -> anyhow::Result<()> { use StreamState::*; let stream_seq = buf.stream_seq(); let seq = buf.seq(); let len = buf.len(); trace!(stream_seq, seq, len, "received video packet",); // Feed the existing stream. if let Streaming(ref mut dec) | Restarting(ref mut dec, _) = self.state { if dec.stream_seq == stream_seq { trace!( stream_seq, seq, pts = buf.pts(), len, "received full video packet", ); STATS.frame_received(stream_seq, seq, len); dec.send_packet(buf)?; return Ok(()); } } // Feed the new stream, if there is one. let new_stream_ready = match self.state { Initializing(ref mut init) | Restarting(_, ref mut init) if init.stream_seq == stream_seq => { trace!( stream_seq, seq, len, "received full video packet for initializing stream", ); // Returns true if the stream is ready. init.send_packet(buf)? } _ => false, }; if new_stream_ready { // N.B. An error here puts us into an invalid state. let (dec, texture, params) = match std::mem::replace(&mut self.state, Empty) { Initializing(init) | Restarting(_, init) => { init.into_decoder(self.proxy.clone())? } Streaming(_) | Empty => unreachable!(), }; let _ = self .proxy .send_event(VideoStreamEvent::VideoStreamReady(texture, params).into()); self.state = Streaming(dec); trace!(state = ?self.state, "video stream updated") } Ok(()) } pub fn prepare_frame(&mut self) -> anyhow::Result> { match self.state { StreamState::Streaming(ref mut dec) | StreamState::Restarting(ref mut dec, _) => { dec.prepare_frame() } StreamState::Empty | StreamState::Initializing(_) => Ok(None), } } pub fn mark_frame_rendered(&mut self) { match self.state { StreamState::Streaming(ref mut dec) | StreamState::Restarting(ref mut dec, _) => { dec.mark_frame_rendered() } StreamState::Empty | StreamState::Initializing(_) => (), } } pub fn is_ready(&self) -> bool { match self.state { StreamState::Empty | StreamState::Initializing(_) => false, StreamState::Streaming(_) | StreamState::Restarting(_, _) => true, } } } struct CPUDecoder { stream_seq: u64, prepared_frame_info: Option, staging_buffer: VkHostBuffer, yuv_buffer_offsets: [usize; 3], yuv_buffer_strides: [usize; 3], // This is reference-counted because we share it with the renderer. video_texture: Arc, texture_width: u32, texture_height: u32, upload_cb: vk::CommandBuffer, upload_fence: vk::Fence, upload_ts_pool: VkTimestampQueryPool, tracy_upload_span: Option, undecoded_send: mpsc::Sender, decoded_recv: mpsc::Receiver, decoder_thread_handle: Option>>, vk: Arc, } /// A temporary struct that receives video packets until it has enough metadata /// to start decoding and recieves a single frame. It also handles timing out /// if it never receives any metadata in the (otherwise valid) video stream. struct DecoderInit { stream_seq: u64, width: u32, height: u32, started: time::Instant, decoder: ffmpeg::decoder::Video, packet: ffmpeg::Packet, first_frame: Option<(ffmpeg::frame::Video, FrameMetadata)>, vk: Arc, } impl DecoderInit { fn new( vk: Arc, stream_seq: u64, codec: protocol::VideoCodec, width: u32, height: u32, ) -> anyhow::Result { let codec = { let id = match codec { protocol::VideoCodec::H264 => ffmpeg::codec::Id::H264, protocol::VideoCodec::H265 => ffmpeg::codec::Id::H265, protocol::VideoCodec::Av1 => ffmpeg::codec::Id::AV1, _ => { error!("unexpected codec: {:?}", codec); unimplemented!(); } }; ffmpeg::decoder::find(id).ok_or(anyhow::anyhow!("codec not found"))? }; let dec_ctx = unsafe { let ptr = ffmpeg_sys::avcodec_alloc_context3(codec.as_ptr()); (*ptr).width = width as i32; (*ptr).height = height as i32; let mut hw_ctx: *mut _ = std::ptr::null_mut(); let device_type = if cfg!(target_vendor = "apple") { ffmpeg_sys::AVHWDeviceType::AV_HWDEVICE_TYPE_VIDEOTOOLBOX } else { ffmpeg_sys::AVHWDeviceType::AV_HWDEVICE_TYPE_VULKAN }; let res = ffmpeg_sys::av_hwdevice_ctx_create( &mut hw_ctx, device_type, std::ptr::null_mut(), std::ptr::null_mut(), 0, ); if res < 0 { warn!("hardware decoding setup failed, falling back to CPU decoder"); } else { (*ptr).hw_device_ctx = hw_ctx; (*ptr).get_format = Some(get_hw_format); } ffmpeg::codec::context::Context::wrap(ptr, None) }; let mut decoder = dec_ctx.decoder(); decoder.set_flags(ffmpeg::codec::Flags::LOW_DELAY); let decoder = decoder.open()?.video()?; let packet = ffmpeg::Packet::empty(); Ok(Self { stream_seq, width, height, started: time::Instant::now(), decoder, packet, first_frame: None, vk, }) } /// Feed a packet into the decoder. Returns true if the parameters of the /// stream have been recovered and it's safe to call into_decoder. Returns /// an error only on timeout. fn send_packet(&mut self, buf: Undecoded) -> anyhow::Result { let info = FrameMetadata { stream_seq: self.stream_seq, seq: buf.seq(), pts: buf.pts(), }; if self.started.elapsed() > DECODER_INIT_TIMEOUT { return Err(anyhow!("timed out waiting for video stream metadata")); } copy_packet(&mut self.packet, buf)?; match self.decoder.send_packet(&self.packet) { Ok(()) => {} Err(ffmpeg::Error::Other { errno: ffmpeg::error::EAGAIN, }) => return Err(anyhow!("decoder already read initial packets")), Err(_) => return Ok(false), } let mut frame = ffmpeg::frame::Video::empty(); match self.decoder.receive_frame(&mut frame) { Ok(()) => { self.first_frame = match frame.format() { ffmpeg::format::Pixel::VULKAN | ffmpeg_next::format::Pixel::VIDEOTOOLBOX => { let sw_format = unsafe { let ctx_ref = (*self.decoder.as_ptr()).hw_frames_ctx; assert!(!ctx_ref.is_null()); let mut transfer_fmt_list = std::ptr::null_mut(); if ffmpeg_sys::av_hwframe_transfer_get_formats( ctx_ref, ffmpeg_sys::AVHWFrameTransferDirection::AV_HWFRAME_TRANSFER_DIRECTION_FROM, &mut transfer_fmt_list, 0) < 0 { bail!("call to av_hwframe_transfer_get_formats failed"); }; let transfer_formats = read_format_list(transfer_fmt_list); assert!(!transfer_formats.is_empty()); transfer_formats[0] }; let mut sw_frame = ffmpeg::frame::Video::new( sw_format, self.decoder.width(), self.decoder.height(), ); unsafe { let res = ffmpeg_sys::av_hwframe_transfer_data( sw_frame.as_mut_ptr(), frame.as_ptr(), 0, ); if res < 0 { return Err(anyhow!("call to av_hwframe_transfer_data failed")); } Some((sw_frame, info)) } } _ => Some((frame, info)), }; Ok(true) } Err(ffmpeg::Error::Other { errno: ffmpeg::error::EAGAIN, }) => Ok(false), Err(e) => Err(e.into()), } } /// Consumes the DecoderInit, returning a CPUDecoder capable of uploading /// frames to the GPU. fn into_decoder( self, proxy: winit::event_loop::EventLoopProxy, ) -> anyhow::Result<(CPUDecoder, Arc, VideoStreamParams)> where T: From + Send, { let width = self.decoder.width(); let height = self.decoder.height(); let decoder_format = self.decoder.format(); let first_frame = match self.first_frame { Some(f) => f, None => return Err(anyhow!("no frames received yet")), }; // If we're using hardware decode, create a "hardware" frame to use with // receive_frame. let output_format = first_frame.0.format(); let ((mut frame, info), mut hw_frame) = match decoder_format { ffmpeg::format::Pixel::VULKAN => { let hw_frame = ffmpeg::frame::Video::new(ffmpeg::format::Pixel::VULKAN, width, height); debug!(format = ?hw_frame.format(), "hw_frame format"); (first_frame, Some(hw_frame)) } ffmpeg::format::Pixel::VIDEOTOOLBOX => { let hw_frame = ffmpeg::frame::Video::new(ffmpeg::format::Pixel::VIDEOTOOLBOX, width, height); (first_frame, Some(hw_frame)) } _ => (first_frame, None), }; // For 10-bit textures, we need to end up in on the GPU in P010LE, // because that's better supported. To make the copy easier, we'll use // swscale to convert to a matching intermediate format. let (intermediate_format, texture_format) = match output_format { ffmpeg::format::Pixel::YUV420P => (None, vk::Format::G8_B8_R8_3PLANE_420_UNORM), ffmpeg::format::Pixel::NV12 => (None, vk::Format::G8_B8R8_2PLANE_420_UNORM), ffmpeg::format::Pixel::P010LE => { (None, vk::Format::G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16) } ffmpeg::format::Pixel::YUV420P10 | ffmpeg::format::Pixel::YUV420P10LE => ( Some(ffmpeg::format::Pixel::P010LE), vk::Format::G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, ), _ => return Err(anyhow!("unexpected pixel format: {:?}", output_format)), }; debug_assert_eq!(frame.width(), width); debug_assert_eq!(frame.height(), height); if width != self.width || height != self.height { return Err(anyhow!( "unexpected video stream dimensions: {}x{} (expected {}x{})", width, height, self.width, self.height )); } let mut intermediate_frame = intermediate_format.map(|fmt| ffmpeg::frame::Video::new(fmt, width, height)); // For the purposes of determining the size of and offsets into the // staging buffer, we use the intermediate frame if it exists, otherwise // the output frame. let model_frame = intermediate_frame.as_ref().unwrap_or(&frame); // Precalculate the layout of the staging buffer. let mut buffer_strides = [0; 3]; let mut buffer_offsets = [0; 3]; let mut buffer_size = 0; for plane in 0..model_frame.planes() { let stride = model_frame.stride(plane); let len = stride * model_frame.plane_height(plane) as usize; buffer_strides[plane] = stride; buffer_offsets[plane] = buffer_size; buffer_size += len; } let staging_buffer = create_host_buffer( &self.vk.device, self.vk.device_info.host_visible_mem_type_index, vk::BufferUsageFlags::TRANSFER_SRC, buffer_size, )?; let color_space = match ( self.decoder.color_space(), self.decoder.color_transfer_characteristic(), ) { (ffmpeg::color::Space::BT709, ffmpeg::color::TransferCharacteristic::BT709) => { ColorSpace::Bt709 } (ffmpeg::color::Space::BT2020NCL, ffmpeg::color::TransferCharacteristic::SMPTE2084) => { ColorSpace::Bt2020Pq } ( ffmpeg::color::Space::Unspecified, ffmpeg::color::TransferCharacteristic::Unspecified, ) => { warn!("video stream has unspecified color primaries or transfer function"); ColorSpace::Bt709 } (cs, ctrc) => bail!("unexpected color description: {:?} / {:?}", cs, ctrc), }; let color_full_range = match self.decoder.color_range() { ffmpeg::color::Range::MPEG => false, ffmpeg::color::Range::JPEG => true, cr => { warn!("unexpected color range: {:?}", cr); false } }; let video_texture = Arc::new(VkImage::new( self.vk.clone(), texture_format, width, height, vk::ImageUsageFlags::TRANSFER_DST | vk::ImageUsageFlags::SAMPLED | vk::ImageUsageFlags::TRANSFER_SRC, vk::SharingMode::EXCLUSIVE, vk::ImageCreateFlags::empty(), )?); // Uploads happen on the present queue. let upload_cb = create_command_buffer(&self.vk.device, self.vk.present_queue.command_pool)?; let upload_fence = create_fence(&self.vk.device, true)?; let upload_ts_pool = create_timestamp_query_pool(&self.vk.device, 2)?; let (undecoded_send, undecoded_recv) = mpsc::channel::(); let (decoded_send, decoded_recv) = mpsc::channel::(); // Send the frame we have from before. decoded_send .send(copy_frame( &mut frame, intermediate_frame.as_mut(), &mut BytesMut::new(), info, )) .unwrap(); // Spawn another thread that receives packets on one channel and sends // completed pictures on another. let stream_seq = self.stream_seq; let mut decoder = self.decoder; let mut packet = self.packet; let decoder_thread_handle = std::thread::Builder::new() .name("CPU decoder".to_string()) .spawn(move || -> anyhow::Result<()> { tracy_client::set_thread_name!("CPU decoder"); // This should have enough capacity for four pictures (YUV420 has // a bpp of 1.5). It will also resize dynamically, of course. let mut scratch = BytesMut::with_capacity((width * height * 6) as usize); for buf in undecoded_recv { let _tracy_frame = tracy_client::non_continuous_frame!("decode"); let span = trace_span!("decode_loop"); let _guard = span.enter(); let info = FrameMetadata { stream_seq, seq: buf.seq(), pts: buf.pts(), }; copy_packet(&mut packet, buf)?; // Send the packet to the decoder. if trace_span!("send_packet") .in_scope(|| decoder.send_packet(&packet)) .is_err() { continue; } // Receive frames until we get EAGAIN. loop { match receive_frame(&mut decoder, &mut frame, hw_frame.as_mut()) { Ok(()) => { let pic = copy_frame( &mut frame, intermediate_frame.as_mut(), &mut scratch, info, ); let span = trace_span!("send"); let _guard = span.enter(); match decoded_send.send(pic) { Ok(()) => {} Err(mpsc::SendError(_)) => return Ok(()), } match proxy.send_event(VideoStreamEvent::VideoFrameAvailable.into()) { Ok(()) => {} Err(_) => return Ok(()), } } Err(ffmpeg::Error::Other { errno: ffmpeg::error::EAGAIN, }) => break, Err(e) => { debug!("receive_frame failed: {:?}", e); return Err(e.into()); } } } } Ok(()) })?; let dec = CPUDecoder { stream_seq: self.stream_seq, prepared_frame_info: None, staging_buffer, yuv_buffer_offsets: buffer_offsets, yuv_buffer_strides: buffer_strides, video_texture: video_texture.clone(), texture_width: width, texture_height: height, upload_cb, upload_fence, upload_ts_pool, tracy_upload_span: None, undecoded_send, decoded_recv, decoder_thread_handle: Some(decoder_thread_handle), vk: self.vk, }; unsafe { dec.prerecord_upload()? }; let params = VideoStreamParams { width, height, color_space, color_full_range, }; Ok((dec, video_texture, params)) } } impl CPUDecoder { fn send_packet(&mut self, buf: Undecoded) -> anyhow::Result<()> { let exit = match self.undecoded_send.send(buf) { Ok(_) => return Ok(()), Err(mpsc::SendError(_)) => match self.decoder_thread_handle.take() { Some(h) => h.join(), None => unreachable!(), }, }; match exit { Ok(Ok(())) => Err(anyhow!("decoding thread exited unexpectedly")), Ok(Err(e)) => Err(e).context("decoding exited with error"), Err(v) => Err(anyhow!("decoding thread panicked: {:?}", v)), } } pub fn prepare_frame(&mut self) -> anyhow::Result> { // If multiple frames are ready, only grab the last one. let mut iterator = self.decoded_recv.try_iter().peekable(); while let Some(pic) = iterator.next() { if iterator.peek().is_some() { STATS.frame_discarded(pic.info.stream_seq, pic.info.seq); debug!( stream_seq = pic.info.stream_seq, seq = pic.info.seq, "discarding frame" ); } else { let pic_info = pic.info; unsafe { self.upload(pic).context("uploading frame to GPU")?; } if let Some(old) = self.prepared_frame_info.replace(pic_info) { debug!( stream_seq = old.stream_seq, seq = old.seq, "overwriting uploaded frame" ); STATS.frame_discarded(old.stream_seq, old.seq); } return Ok(Some(pic_info)); } } Ok(None) } pub fn mark_frame_rendered(&mut self) { if let Some(info) = self.prepared_frame_info.take() { STATS.frame_rendered(info.stream_seq, info.seq); } } unsafe fn upload(&mut self, pic: YUVPicture) -> anyhow::Result<()> { // Wait for the previous upload to complete. let device = &self.vk.device; device.wait_for_fences(&[self.upload_fence], true, u64::MAX)?; // Copy data into the staging buffer. self.yuv_buffer_offsets .iter() .zip(pic.planes.iter()) .take(pic.num_planes) .for_each(|(offset, src)| { let dst = std::slice::from_raw_parts_mut( (self.staging_buffer.access as *mut u8).add(*offset), src.len(), ); dst.copy_from_slice(src); }); // Trace the upload, including loading timestamps for the previous upload. if let Some(ctx) = &self.vk.tracy_context { if let Some(prev_span) = self.tracy_upload_span.take() { let timestamps = self.upload_ts_pool.fetch_results(&self.vk.device)?; prev_span.upload_timestamp(timestamps[0], timestamps[1]); } self.tracy_upload_span = Some(ctx.span(tracy_client::span_location!())?); } // The command buffer was prerecorded, so we can directly submit it. { let cbs = [self.upload_cb]; let submit_info = vk::SubmitInfo::default().command_buffers(&cbs); self.vk.device.reset_fences(&[self.upload_fence])?; trace!(queue = ?self.vk.present_queue.queue, "queue submit for upload"); let submits = [submit_info]; device.queue_submit(self.vk.present_queue.queue, &submits, self.upload_fence)?; } if let Some(span) = self.tracy_upload_span.as_mut() { span.end_zone(); } Ok(()) } unsafe fn prerecord_upload(&self) -> anyhow::Result<()> { let device = &self.vk.device; // Reset the command buffer. device.reset_command_buffer(self.upload_cb, vk::CommandBufferResetFlags::empty())?; // Begin the command buffer. { let begin_info = vk::CommandBufferBeginInfo::default() .flags(vk::CommandBufferUsageFlags::SIMULTANEOUS_USE); device.begin_command_buffer(self.upload_cb, &begin_info)?; } // Record the start timestamp. self.upload_ts_pool.cmd_reset(device, self.upload_cb); device.cmd_write_timestamp( self.upload_cb, vk::PipelineStageFlags::TOP_OF_PIPE, self.upload_ts_pool.pool, 0, ); // Transfer the image to be writable. cmd_image_barrier( device, self.upload_cb, self.video_texture.image, vk::PipelineStageFlags::TOP_OF_PIPE, vk::AccessFlags::empty(), vk::PipelineStageFlags::TRANSFER, vk::AccessFlags::TRANSFER_WRITE, vk::ImageLayout::UNDEFINED, vk::ImageLayout::TRANSFER_DST_OPTIMAL, ); // Upload from the staging buffer to the texture. { let num_planes = match self.video_texture.format { vk::Format::G8_B8_R8_3PLANE_420_UNORM => 3, vk::Format::G8_B8R8_2PLANE_420_UNORM => 2, vk::Format::G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 => 2, _ => unreachable!(), }; let regions = [ vk::ImageAspectFlags::PLANE_0, vk::ImageAspectFlags::PLANE_1, vk::ImageAspectFlags::PLANE_2, ] .into_iter() .enumerate() .take(num_planes) .map(|(plane, plane_aspect_mask)| { // Vulkan considers the image width/height to be 1/2 the size // for the U and V planes. let (width, height) = if plane == 0 { (self.texture_width, self.texture_height) } else { (self.texture_width / 2, self.texture_height / 2) }; let texel_width = match self.video_texture.format { vk::Format::G8_B8_R8_3PLANE_420_UNORM => 1, vk::Format::G8_B8R8_2PLANE_420_UNORM => { if plane == 0 { 1 } else { 2 } } vk::Format::G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 => { if plane == 0 { 2 } else { 4 } } _ => unreachable!(), }; vk::BufferImageCopy::default() .buffer_offset(self.yuv_buffer_offsets[plane] as u64) .buffer_row_length((self.yuv_buffer_strides[plane] / texel_width) as u32) // In texels. .image_subresource(vk::ImageSubresourceLayers { aspect_mask: plane_aspect_mask, mip_level: 0, base_array_layer: 0, layer_count: 1, }) .image_extent(vk::Extent3D { width, height, depth: 1, }) }) .collect::>(); device.cmd_copy_buffer_to_image( self.upload_cb, self.staging_buffer.buffer, self.video_texture.image, vk::ImageLayout::TRANSFER_DST_OPTIMAL, ®ions, ); } // Transfer the image back to be readable. cmd_image_barrier( device, self.upload_cb, self.video_texture.image, vk::PipelineStageFlags::TRANSFER, vk::AccessFlags::TRANSFER_WRITE, vk::PipelineStageFlags::FRAGMENT_SHADER, vk::AccessFlags::SHADER_READ, vk::ImageLayout::UNDEFINED, vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL, ); // Record the end timestamp. device.cmd_write_timestamp( self.upload_cb, vk::PipelineStageFlags::BOTTOM_OF_PIPE, self.upload_ts_pool.pool, 1, ); device.end_command_buffer(self.upload_cb)?; Ok(()) } } impl Drop for CPUDecoder { fn drop(&mut self) { let device = &self.vk.device; unsafe { device.queue_wait_idle(self.vk.present_queue.queue).ok(); destroy_host_buffer(device, &self.staging_buffer); device.destroy_fence(self.upload_fence, None); device.destroy_query_pool(self.upload_ts_pool.pool, None); device.free_command_buffers(self.vk.present_queue.command_pool, &[self.upload_cb]); } } } #[instrument(skip_all)] fn receive_frame( dec: &mut ffmpeg::decoder::Video, frame: &mut ffmpeg::frame::Video, hw_frame: Option<&mut ffmpeg::frame::Video>, ) -> Result<(), ffmpeg::Error> { match hw_frame { Some(f) => { dec.receive_frame(f)?; unsafe { let res = ffmpeg_sys::av_hwframe_transfer_data(frame.as_mut_ptr(), f.as_ptr(), 0); if res < 0 { error!("call to av_hwframe_transfer_data failed"); Err(ffmpeg::Error::Other { errno: res }) } else { Ok(()) } } } None => dec.receive_frame(frame), } } #[instrument(skip_all)] fn copy_packet(pkt: &mut ffmpeg::Packet, buf: Undecoded) -> anyhow::Result<()> { // It's necessary to reset the packet metadata for each NAL. unsafe { use ffmpeg::packet::Mut; ffmpeg_sys::av_init_packet(pkt.as_mut_ptr()); } // Copy into data. let packet_len = buf.len(); match pkt.size().cmp(&packet_len) { std::cmp::Ordering::Less => { pkt.grow(packet_len - pkt.size()); } std::cmp::Ordering::Greater => { // Takes the new total, not the amount to shrink. pkt.shrink(packet_len); } std::cmp::Ordering::Equal => {} }; buf.copy_to_slice(pkt.data_mut().unwrap()); Ok(()) } #[instrument(skip_all)] fn copy_frame( frame: &mut ffmpeg::frame::Video, intermediate_frame: Option<&mut ffmpeg::frame::Video>, scratch: &mut BytesMut, info: FrameMetadata, ) -> YUVPicture { let transfer_src = if let Some(intermediate) = intermediate_frame { // TODO reuse let mut ctx = ffmpeg::software::scaling::Context::get( frame.format(), frame.width(), frame.height(), intermediate.format(), intermediate.width(), intermediate.height(), ffmpeg::software::scaling::Flags::empty(), ) .expect("failed to create sws ctx"); ctx.run(frame, intermediate).expect("failed to convert"); intermediate } else { frame }; let mut pic = YUVPicture { planes: [Bytes::new(), Bytes::new(), Bytes::new()], num_planes: transfer_src.planes(), info, }; scratch.truncate(0); for plane in 0..transfer_src.planes() { scratch.extend_from_slice(transfer_src.data(plane)); pic.planes[plane] = scratch.split().freeze(); } pic } #[no_mangle] unsafe extern "C" fn get_hw_format( ctx: *mut ffmpeg_sys::AVCodecContext, list: *const ffmpeg_sys::AVPixelFormat, ) -> ffmpeg_sys::AVPixelFormat { use ffmpeg_sys::AVPixelFormat::*; let sw_pix_fmt = (*ctx).sw_pix_fmt; let formats = read_format_list(list); debug!(?formats, ?sw_pix_fmt, "get_hw_format"); if formats.contains(&ffmpeg::format::Pixel::VULKAN) { return AV_PIX_FMT_VULKAN; } else if formats.contains(&ffmpeg::format::Pixel::VIDEOTOOLBOX) { let frames_ctx_ref = ffmpeg_sys::av_hwframe_ctx_alloc((*ctx).hw_device_ctx); if frames_ctx_ref.is_null() { error!("call to av_hwframe_ctx_alloc failed"); return sw_pix_fmt; } let frames_ctx = (*frames_ctx_ref).data as *mut ffmpeg_sys::AVHWFramesContext; (*frames_ctx).width = (*ctx).width; (*frames_ctx).height = (*ctx).height; (*frames_ctx).format = AV_PIX_FMT_VIDEOTOOLBOX; (*frames_ctx).sw_format = AV_PIX_FMT_YUV420P; let res = ffmpeg_sys::av_hwframe_ctx_init(frames_ctx_ref); if res < 0 { error!("call to av_hwframe_ctx_init failed"); return sw_pix_fmt; } debug!("using VideoToolbox hardware encoder"); (*ctx).hw_frames_ctx = frames_ctx_ref; return AV_PIX_FMT_VIDEOTOOLBOX; } warn!("unable to determine ffmpeg hw format"); sw_pix_fmt } unsafe fn read_format_list( mut ptr: *const ffmpeg_sys::AVPixelFormat, ) -> Vec { let mut formats = Vec::new(); while !ptr.is_null() && *ptr != ffmpeg_sys::AVPixelFormat::AV_PIX_FMT_NONE { formats.push((*ptr).into()); ptr = ptr.add(1); } formats } ================================================ FILE: mm-client/src/vulkan.rs ================================================ #![allow(clippy::missing_safety_doc)] // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::{ ffi::{c_void, CStr, CString}, sync::Arc, }; use anyhow::{anyhow, bail, Context}; use ash::{ ext::debug_utils, khr::{ dynamic_rendering, surface, swapchain, video_decode_av1, video_decode_h264, video_decode_h265, video_decode_queue, video_queue, }, vk, }; use cstr::cstr; use tracing::{debug, error, info, warn}; use winit::raw_window_handle::{HasDisplayHandle, HasWindowHandle as _}; use crate::video::ColorSpace; pub struct VkDebugContext { debug: debug_utils::Instance, messenger: vk::DebugUtilsMessengerEXT, } pub struct VkQueue { pub queue: vk::Queue, pub command_pool: vk::CommandPool, } pub struct VkDeviceInfo { pub device_name: CString, pub device_type: vk::PhysicalDeviceType, pub limits: vk::PhysicalDeviceLimits, pub present_family: u32, pub decode_family: Option, pub supports_h264: bool, pub supports_h265: bool, pub supports_av1: bool, pub memory_props: vk::PhysicalDeviceMemoryProperties, pub host_visible_mem_type_index: u32, pub host_mem_is_cached: bool, pub selected_extensions: Vec, } pub struct VkContext { pub entry: ash::Entry, pub instance: ash::Instance, pub swapchain_loader: swapchain::Device, pub surface_loader: surface::Instance, pub dynamic_rendering_loader: dynamic_rendering::Device, pub surface: vk::SurfaceKHR, pub pdevice: vk::PhysicalDevice, pub device: ash::Device, pub device_info: VkDeviceInfo, pub present_queue: VkQueue, pub decode_queue: Option, pub debug: Option, pub tracy_context: Option, // Hold on to a reference to the window, so that it gets dropped last. _window: Arc, } impl VkDeviceInfo { fn query( instance: &ash::Instance, surface_loader: &surface::Instance, surface: vk::SurfaceKHR, device: vk::PhysicalDevice, ) -> anyhow::Result { let props = unsafe { instance.get_physical_device_properties(device) }; let device_type = props.device_type; let device_name = unsafe { CStr::from_ptr(props.device_name.as_ptr()).to_owned() }; let queue_families = unsafe { instance .get_physical_device_queue_family_properties(device) .into_iter() .collect::>() }; let present_family = queue_families .iter() .enumerate() .find(|(idx, properties)| { properties.queue_flags.contains(vk::QueueFlags::GRAPHICS) && properties.queue_flags.contains(vk::QueueFlags::COMPUTE) && unsafe { surface_loader .get_physical_device_surface_support(device, *idx as u32, surface) .unwrap_or(false) } }) .map(|(index, _)| index as u32) .to_owned() .ok_or_else(|| anyhow::anyhow!("no graphics queue found"))?; let decode_family = queue_families .iter() .enumerate() .find(|(_, properties)| { properties .queue_flags .contains(vk::QueueFlags::VIDEO_DECODE_KHR) }) .map(|(index, _)| index as u32); let available_extensions = unsafe { instance .enumerate_device_extension_properties(device) .unwrap() .into_iter() .map(|properties| CStr::from_ptr(&properties.extension_name as *const _).to_owned()) .collect::>() }; let ext_swapchain = swapchain::NAME; if !contains(&available_extensions, ext_swapchain) { return Err(anyhow::anyhow!("swapchain extension not available")); } let mut selected_extensions = vec![ ext_swapchain.to_owned(), dynamic_rendering::NAME.to_owned(), #[cfg(any(target_os = "macos", target_os = "ios"))] vk::KhrPortabilitySubsetFn::name().to_owned(), ]; let ext_video_queue = video_queue::NAME; let ext_video_decode_queue = video_decode_queue::NAME; let ext_h264 = video_decode_h264::NAME; let ext_h265 = video_decode_h265::NAME; let ext_av1 = video_decode_av1::NAME; let mut supports_h264 = false; let mut supports_h265 = false; let mut supports_av1 = false; if decode_family.is_some() && contains(&available_extensions, ext_video_queue) && contains(&available_extensions, ext_video_decode_queue) { selected_extensions.push(ext_video_decode_queue.to_owned()); selected_extensions.push(ext_video_queue.to_owned()); if contains(&available_extensions, ext_h264) { supports_h264 = true; selected_extensions.push(ext_h264.to_owned()); } if contains(&available_extensions, ext_h265) { supports_h265 = true; selected_extensions.push(ext_h265.to_owned()); } // This doesn't actually exist yet. if contains(&available_extensions, ext_av1) { supports_av1 = true; selected_extensions.push(ext_av1.to_owned()); } } // We want HOST_CACHED | HOST_COHERENT, but we can make do with just // HOST_VISIBLE. let memory_props = unsafe { instance.get_physical_device_memory_properties(device) }; let (host_visible_mem_type_index, host_mem_is_cached) = { let mut cached = true; let mut idx = select_memory_type( &memory_props, vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_CACHED | vk::MemoryPropertyFlags::HOST_COHERENT, None, ); if idx.is_none() { idx = select_memory_type( &memory_props, vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_COHERENT, None, ); if idx.is_none() { bail!("no host visible memory type found"); } cached = false; } (idx.unwrap(), cached) }; Ok(Self { device_name, device_type, limits: props.limits, present_family, decode_family, supports_h264, supports_h265, supports_av1, memory_props, host_visible_mem_type_index, host_mem_is_cached, selected_extensions, }) } pub fn is_integrated(&self) -> bool { self.device_type == vk::PhysicalDeviceType::INTEGRATED_GPU } } impl VkContext { pub unsafe fn new(window: Arc, debug: bool) -> anyhow::Result { // MoltenVK is very noisy. #[cfg(target_os = "macos")] std::env::set_var( "MVK_CONFIG_LOG_LEVEL", std::env::var("MVK_CONFIG_LOG_LEVEL").unwrap_or("0".to_string()), ); #[cfg(all(target_os = "macos", feature = "moltenvk_static"))] let entry = ash_molten::load(); #[cfg(not(all(target_os = "macos", feature = "moltenvk_static")))] let entry = unsafe { ash::Entry::load().context("failed to load vulkan libraries!") }?; debug!("creating vulkan instance"); let (major, minor) = match entry.try_enumerate_instance_version()? { // Vulkan 1.1+ Some(version) => ( vk::api_version_major(version), vk::api_version_minor(version), ), // Vulkan 1.0 None => (1, 0), }; if major < 1 || (major == 1 && minor < 2) { return Err(anyhow::anyhow!("vulkan 1.2 or higher is required")); } // MoltenVK doesn't actually support 1.3. let (major, minor) = if cfg!(any(target_os = "macos")) { (1, 2) } else { (major, minor) }; let app_info = vk::ApplicationInfo::default() .application_name(cstr!("Magic Mirror")) .application_version(vk::make_api_version(0, 0, 1, 0)) .engine_name(cstr!("No Engine")) .engine_version(vk::make_api_version(0, 0, 1, 0)) .api_version(vk::make_api_version(0, major, minor, 0)); let mut extensions = ash_window::enumerate_required_extensions(window.display_handle()?.as_raw())?.to_vec(); let mut layers = Vec::new(); #[cfg(all(target_os = "macos", not(feature = "moltenvk_static")))] { extensions.push(vk::KhrPortabilityEnumerationFn::name().as_ptr()); // Enabling this extension is a requirement when using // `VK_KHR_portability_subset` extensions.push(vk::KhrGetPhysicalDeviceProperties2Fn::name().as_ptr()); } if debug { let props = entry.enumerate_instance_extension_properties(None)?; let available_extensions = props .into_iter() .map(|properties| unsafe { CStr::from_ptr(&properties.extension_name as *const _).to_owned() }) .collect::>(); if !available_extensions .iter() .any(|ext| ext.as_c_str() == debug_utils::NAME) { return Err(anyhow::anyhow!( "debug utils extension requested, but not available" )); } warn!("vulkan debug tooling enabled"); extensions.push(debug_utils::NAME.as_ptr()); let validation_layer = cstr!("VK_LAYER_KHRONOS_validation"); let layer_props = entry.enumerate_instance_layer_properties()?; if layer_props .into_iter() .map(|properties| unsafe { CStr::from_ptr(&properties.layer_name as *const _) }) .any(|layer| layer == validation_layer) { layers.push(validation_layer.as_ptr()); } else { warn!("validation layers requested, but not available!") } } let instance = { let flags = if cfg!(any(target_os = "macos", target_os = "ios")) { vk::InstanceCreateFlags::ENUMERATE_PORTABILITY_KHR } else { vk::InstanceCreateFlags::default() }; let instance_create_info = vk::InstanceCreateInfo::default() .flags(flags) .application_info(&app_info) .enabled_layer_names(&layers) .enabled_extension_names(&extensions); unsafe { entry.create_instance(&instance_create_info, None)? } }; let surface_loader = surface::Instance::new(&entry, &instance); let surface = unsafe { ash_window::create_surface( &entry, &instance, window.display_handle()?.as_raw(), window.window_handle()?.as_raw(), None, )? }; let debug_utils = if debug { let debug_utils = debug_utils::Instance::new(&entry, &instance); let create_info = vk::DebugUtilsMessengerCreateInfoEXT::default() .message_severity( vk::DebugUtilsMessageSeverityFlagsEXT::WARNING | vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE | vk::DebugUtilsMessageSeverityFlagsEXT::INFO | vk::DebugUtilsMessageSeverityFlagsEXT::ERROR, ) .message_type( vk::DebugUtilsMessageTypeFlagsEXT::GENERAL | vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE | vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION, ) .pfn_user_callback(Some(vulkan_debug_utils_callback)); let messenger = unsafe { debug_utils.create_debug_utils_messenger(&create_info, None) }?; Some(VkDebugContext { debug: debug_utils, messenger, }) } else { None }; // Select a device based on encoding support. let devices = unsafe { instance.enumerate_physical_devices()? }; let mut devices = devices .into_iter() .enumerate() .flat_map(|(index, dev)| { match VkDeviceInfo::query(&instance, &surface_loader, surface, dev) { Ok(info) => Some((index as u32, dev, info)), Err(err) => { let device_name = unsafe { CStr::from_ptr( instance .get_physical_device_properties(dev) .device_name .as_ptr(), ) .to_owned() }; warn!("gpu {device_name:?} ineligible: {err}"); None } } }) .collect::>(); devices.sort_by_key(|(_, _, info)| { let mut score = match info.device_type { vk::PhysicalDeviceType::DISCRETE_GPU => 0, vk::PhysicalDeviceType::INTEGRATED_GPU => 10, _ => 20, }; score += info.decode_family.is_none() as u32; score += !info.supports_h264 as u32; score += !info.supports_h265 as u32; score += !info.supports_av1 as u32; score }); if devices.is_empty() { return Err(anyhow!("no eligible GPU found!")); } let (index, pdevice, device_info) = devices.remove(0); info!("selected gpu: {:?} ({index})", device_info.device_name); let device = { let queue_priorities = &[1.0]; let mut queue_indices = Vec::new(); queue_indices.push(device_info.present_family); if let Some(idx) = device_info.decode_family { queue_indices.push(idx); } queue_indices.dedup(); let queue_create_infos = queue_indices .iter() .map(|&index| { vk::DeviceQueueCreateInfo::default() .queue_family_index(index) .queue_priorities(queue_priorities) }) .collect::>(); let mut enabled_1_1_features = vk::PhysicalDeviceVulkan11Features::default().sampler_ycbcr_conversion(true); let mut dynamic_rendering_features = vk::PhysicalDeviceDynamicRenderingFeatures::default().dynamic_rendering(true); let extension_names = device_info .selected_extensions .iter() .map(|v| v.as_c_str().as_ptr()) .collect::>(); let device_create_info = vk::DeviceCreateInfo::default() .queue_create_infos(&queue_create_infos) .enabled_extension_names(&extension_names) .push_next(&mut enabled_1_1_features) .push_next(&mut dynamic_rendering_features); unsafe { instance.create_device(pdevice, &device_create_info, None)? } }; let present_queue = get_queue_with_command_pool(&device, device_info.present_family)?; let mut decode_queue = None; if device_info.decode_family.is_some() { info!( "vulkan video decode support: (h264: {}, h265: {}, av1: {})", device_info.supports_h264, device_info.supports_h265, device_info.supports_av1 ); decode_queue = Some(get_queue_with_command_pool( &device, device_info.decode_family.unwrap(), )?); } else { debug!("no vulkan video support found") } if !device_info.host_mem_is_cached { warn!("no cache-coherent memory type found on device!"); } let swapchain_loader = swapchain::Device::new(&instance, &device); let dynamic_rendering_loader = dynamic_rendering::Device::new(&instance, &device); let tracy_context = tracy_client::Client::running().and_then(|client| { match init_tracy_context(&device, &device_info, &present_queue, client) { Ok(ctx) => Some(ctx), Err(err) => { error!("failed to initialize tracy GPU context: {err}"); None } } }); Ok(Self { entry, instance, swapchain_loader, surface_loader, dynamic_rendering_loader, surface, pdevice, device, device_info, present_queue, decode_queue, debug: debug_utils, tracy_context, _window: window, }) } } impl Drop for VkContext { fn drop(&mut self) { let device = &self.device; unsafe { device.destroy_command_pool(self.present_queue.command_pool, None); if let Some(decode_queue) = self.decode_queue.take() { device.destroy_command_pool(decode_queue.command_pool, None); } if let Some(debug) = self.debug.take() { debug .debug .destroy_debug_utils_messenger(debug.messenger, None); } self.surface_loader.destroy_surface(self.surface, None); self.device.destroy_device(None); self.instance.destroy_instance(None); } } } fn contains(list: &[CString], str: &'static CStr) -> bool { list.iter().any(|v| v.as_c_str() == str) } fn init_tracy_context( device: &ash::Device, pdevice: &VkDeviceInfo, present_queue: &VkQueue, client: tracy_client::Client, ) -> anyhow::Result { // Query the timestamp once to calibrate the clocks. let cb = create_command_buffer(device, present_queue.command_pool)?; unsafe { device.reset_command_buffer(cb, vk::CommandBufferResetFlags::empty())?; let query_pool = create_timestamp_query_pool(device, 1)?; let fence = create_fence(device, false)?; // Begin the command buffer. device.begin_command_buffer( cb, &vk::CommandBufferBeginInfo::default() .flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT), )?; // Write a timestamp. query_pool.cmd_reset(device, cb); device.cmd_write_timestamp( cb, vk::PipelineStageFlags::BOTTOM_OF_PIPE, query_pool.pool, 0, ); // Submit. device.end_command_buffer(cb)?; let cbs = [cb]; device.queue_submit( present_queue.queue, &[vk::SubmitInfo::default().command_buffers(&cbs)], fence, )?; // Wait for the fence, fetch the timestamp. device.wait_for_fences(&[fence], true, u64::MAX)?; let ts = query_pool.fetch_results(device)?[0]; let context = client.new_gpu_context( Some("present queue"), tracy_client::GpuContextType::Vulkan, ts as i64, pdevice.limits.timestamp_period, )?; // Cleanup. device.free_command_buffers(present_queue.command_pool, &[cb]); device.destroy_fence(fence, None); device.destroy_query_pool(query_pool.pool, None); Ok(context) } } pub fn select_memory_type( props: &vk::PhysicalDeviceMemoryProperties, flags: vk::MemoryPropertyFlags, req: Option, ) -> Option { for i in 0..props.memory_type_count { if let Some(req) = req { if req.memory_type_bits & (1 << i) == 0 { continue; } } if flags.is_empty() || props.memory_types[i as usize] .property_flags .contains(flags) { return Some(i); } } None } fn get_queue_with_command_pool(device: &ash::Device, idx: u32) -> Result { let queue = unsafe { device.get_device_queue(idx, 0) }; let command_pool = unsafe { let create_info = vk::CommandPoolCreateInfo::default() .queue_family_index(idx) .flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER); device.create_command_pool(&create_info, None)? }; Ok(VkQueue { queue, command_pool, }) } pub fn create_command_buffer( device: &ash::Device, pool: vk::CommandPool, ) -> anyhow::Result { let create_info = vk::CommandBufferAllocateInfo::default() .level(vk::CommandBufferLevel::PRIMARY) .command_pool(pool) .command_buffer_count(1); let cb = unsafe { device .allocate_command_buffers(&create_info) .context("failed to allocate render command buffer")? .pop() .unwrap() }; Ok(cb) } pub struct VkImage { pub image: vk::Image, pub memory: vk::DeviceMemory, pub format: vk::Format, pub width: u32, pub height: u32, vk: Arc, } impl VkImage { pub fn new( vk: Arc, format: vk::Format, width: u32, height: u32, usage: vk::ImageUsageFlags, sharing_mode: vk::SharingMode, flags: vk::ImageCreateFlags, ) -> anyhow::Result { let image = { let create_info = vk::ImageCreateInfo::default() .image_type(vk::ImageType::TYPE_2D) .format(format) .extent(vk::Extent3D { width, height, depth: 1, }) .mip_levels(1) .array_layers(1) .samples(vk::SampleCountFlags::TYPE_1) .tiling(vk::ImageTiling::OPTIMAL) .usage(usage) .sharing_mode(sharing_mode) .initial_layout(vk::ImageLayout::UNDEFINED) .flags(flags); unsafe { vk.device .create_image(&create_info, None) .context("VkCreateImage")? } }; let memory = unsafe { bind_memory_for_image(&vk.device, &vk.device_info.memory_props, image)? }; Ok(Self { image, memory, format, width, height, vk, }) } pub fn wrap( vk: Arc, image: vk::Image, memory: vk::DeviceMemory, format: vk::Format, width: u32, height: u32, ) -> Self { Self { image, memory, format, width, height, vk, } } pub fn extent(&self) -> vk::Extent2D { vk::Extent2D { width: self.width, height: self.height, } } pub fn rect(&self) -> vk::Rect2D { vk::Rect2D { offset: vk::Offset2D { x: 0, y: 0 }, extent: self.extent(), } } } impl Drop for VkImage { fn drop(&mut self) { unsafe { self.vk.device.destroy_image(self.image, None); self.vk.device.free_memory(self.memory, None); } } } pub unsafe fn bind_memory_for_image( device: &ash::Device, props: &vk::PhysicalDeviceMemoryProperties, image: vk::Image, ) -> anyhow::Result { let image_memory_req = unsafe { device.get_image_memory_requirements(image) }; let mem_type_index = select_memory_type( props, vk::MemoryPropertyFlags::DEVICE_LOCAL, Some(image_memory_req), ); if mem_type_index.is_none() { bail!( "no appropriate memory type found for reqs: {:?}", image_memory_req ); } let memory = { let image_allocate_info = vk::MemoryAllocateInfo::default() .allocation_size(image_memory_req.size) .memory_type_index(mem_type_index.unwrap()); unsafe { device .allocate_memory(&image_allocate_info, None) .context("VkAllocateMemory")? } }; unsafe { device .bind_image_memory(image, memory, 0) .context("VkBindImageMemory")?; } Ok(memory) } pub unsafe fn create_image_view( device: &ash::Device, image: vk::Image, format: vk::Format, sampler_conversion: Option, ) -> anyhow::Result { let mut create_info = vk::ImageViewCreateInfo::default() .image(image) .view_type(vk::ImageViewType::TYPE_2D) .format(format) .components(vk::ComponentMapping { r: vk::ComponentSwizzle::IDENTITY, g: vk::ComponentSwizzle::IDENTITY, b: vk::ComponentSwizzle::IDENTITY, a: vk::ComponentSwizzle::IDENTITY, }) .subresource_range(vk::ImageSubresourceRange { aspect_mask: vk::ImageAspectFlags::COLOR, base_mip_level: 0, level_count: vk::REMAINING_MIP_LEVELS, base_array_layer: 0, layer_count: vk::REMAINING_ARRAY_LAYERS, }); let mut sampler_conversion_info; if let Some(sampler_conversion) = sampler_conversion { sampler_conversion_info = vk::SamplerYcbcrConversionInfo::default().conversion(sampler_conversion); create_info = create_info.push_next(&mut sampler_conversion_info); } device .create_image_view(&create_info, None) .context("VkCreateImageView") } #[derive(Copy, Clone)] pub struct VkHostBuffer { pub buffer: vk::Buffer, pub memory: vk::DeviceMemory, pub access: *mut c_void, } pub fn create_host_buffer( device: &ash::Device, mem_type: u32, usage: vk::BufferUsageFlags, size: usize, ) -> Result { let buffer = { let create_info = vk::BufferCreateInfo::default() .size(size as u64) .usage(usage) .sharing_mode(vk::SharingMode::EXCLUSIVE); unsafe { device.create_buffer(&create_info, None)? } }; let memory = { let requirements = unsafe { device.get_buffer_memory_requirements(buffer) }; let alloc_info = vk::MemoryAllocateInfo::default() .allocation_size(requirements.size) .memory_type_index(mem_type); unsafe { device.allocate_memory(&alloc_info, None)? } }; unsafe { device.bind_buffer_memory(buffer, memory, 0)? }; let access = { unsafe { device.map_memory(memory, 0, vk::WHOLE_SIZE, vk::MemoryMapFlags::empty())? } }; Ok(VkHostBuffer { buffer, memory, access, }) } pub unsafe fn destroy_host_buffer(device: &ash::Device, buffer: &VkHostBuffer) { device.unmap_memory(buffer.memory); device.destroy_buffer(buffer.buffer, None); device.free_memory(buffer.memory, None); } pub struct VkTimestampQueryPool { pub pool: vk::QueryPool, num_timestamps: u32, } impl VkTimestampQueryPool { pub unsafe fn cmd_reset(&self, device: &ash::Device, command_buffer: vk::CommandBuffer) { device.cmd_reset_query_pool(command_buffer, self.pool, 0, self.num_timestamps); } pub fn fetch_results(&self, device: &ash::Device) -> anyhow::Result> { let mut results = vec![0_i64; self.num_timestamps as usize]; unsafe { device .get_query_pool_results(self.pool, 0, &mut results, vk::QueryResultFlags::empty()) .context("vkGetQueryPoolResults")?; } for v in &results { assert!(v > &0_i64, "invalid query pool results") } Ok(results) } } pub fn create_timestamp_query_pool( device: &ash::Device, num_timestamps: u32, ) -> anyhow::Result { let create_info = vk::QueryPoolCreateInfo::default() .query_type(vk::QueryType::TIMESTAMP) .query_count(num_timestamps); let pool = unsafe { device .create_query_pool(&create_info, None) .context("vkCreateQueryPool")? }; Ok(VkTimestampQueryPool { pool, num_timestamps, }) } pub fn create_fence(device: &ash::Device, signalled: bool) -> Result { let mut create_info = vk::FenceCreateInfo::default(); if signalled { create_info = create_info.flags(vk::FenceCreateFlags::SIGNALED); } let fence = unsafe { device.create_fence(&create_info, None)? }; Ok(fence) } pub fn create_semaphore(device: &ash::Device) -> Result { let semaphore = unsafe { device.create_semaphore(&vk::SemaphoreCreateInfo::default(), None)? }; Ok(semaphore) } pub fn load_shader(device: &ash::Device, bytes: &[u8]) -> anyhow::Result { let code = ash::util::read_spv(&mut std::io::Cursor::new(bytes))?; let create_info = vk::ShaderModuleCreateInfo::default().code(&code); let shader = unsafe { device.create_shader_module(&create_info, None)? }; Ok(shader) } pub fn create_ycbcr_sampler_conversion( device: &ash::Device, format: vk::Format, params: &crate::video::VideoStreamParams, ) -> anyhow::Result { let ycbcr_model = match params.color_space { ColorSpace::Bt709 => vk::SamplerYcbcrModelConversion::YCBCR_709, ColorSpace::Bt2020Pq => vk::SamplerYcbcrModelConversion::YCBCR_2020, }; let ycbcr_range = if params.color_full_range { vk::SamplerYcbcrRange::ITU_FULL } else { vk::SamplerYcbcrRange::ITU_NARROW }; let create_info = vk::SamplerYcbcrConversionCreateInfo::default() .format(format) .ycbcr_model(ycbcr_model) .ycbcr_range(ycbcr_range) .chroma_filter(vk::Filter::LINEAR) .x_chroma_offset(vk::ChromaLocation::MIDPOINT) .y_chroma_offset(vk::ChromaLocation::MIDPOINT); let conversion = unsafe { device.create_sampler_ycbcr_conversion(&create_info, None)? }; Ok(conversion) } pub fn get_ycbcr_conversion_properties( device: vk::PhysicalDevice, instance: &ash::Instance, format: vk::Format, ) -> anyhow::Result { let mut ycbcr_props = vk::SamplerYcbcrConversionImageFormatProperties::default(); let mut image_format_props2 = vk::ImageFormatProperties2::default().push_next(&mut ycbcr_props); let image_format_info = vk::PhysicalDeviceImageFormatInfo2::default() .format(format) .ty(vk::ImageType::TYPE_2D) .tiling(vk::ImageTiling::OPTIMAL) .usage(vk::ImageUsageFlags::SAMPLED); unsafe { instance.get_physical_device_image_format_properties2( device, &image_format_info, &mut image_format_props2, )?; } Ok(ycbcr_props) } unsafe extern "system" fn vulkan_debug_utils_callback( message_severity: vk::DebugUtilsMessageSeverityFlagsEXT, message_type: vk::DebugUtilsMessageTypeFlagsEXT, p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT, _userdata: *mut c_void, ) -> vk::Bool32 { let _ = std::panic::catch_unwind(|| { let message = unsafe { CStr::from_ptr((*p_callback_data).p_message) }.to_string_lossy(); let ty = format!("{:?}", message_type).to_lowercase(); // TODO: these should all be debug. match message_severity { vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => { tracing::trace!(ty, "{}", message) } vk::DebugUtilsMessageSeverityFlagsEXT::INFO => info!(ty, "{}", message), vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => warn!(ty, "{}", message), vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => error!(ty, "{}", message), _ => (), } }); // Must always return false. vk::FALSE } #[allow(clippy::too_many_arguments)] pub fn cmd_image_barrier( device: &ash::Device, command_buffer: vk::CommandBuffer, image: vk::Image, src_stage_mask: vk::PipelineStageFlags, src_access_mask: vk::AccessFlags, dst_stage_mask: vk::PipelineStageFlags, dst_access_mask: vk::AccessFlags, old_layout: vk::ImageLayout, new_layout: vk::ImageLayout, ) { let barrier = vk::ImageMemoryBarrier::default() .src_access_mask(src_access_mask) .dst_access_mask(dst_access_mask) .old_layout(old_layout) .new_layout(new_layout) .image(image) .subresource_range(vk::ImageSubresourceRange { aspect_mask: vk::ImageAspectFlags::COLOR, base_mip_level: 0, level_count: 1, base_array_layer: 0, layer_count: 1, }); unsafe { device.cmd_pipeline_barrier( command_buffer, src_stage_mask, dst_stage_mask, vk::DependencyFlags::empty(), &[], &[], &[barrier], ) }; } ================================================ FILE: mm-client-common/Cargo.toml ================================================ [package] name = "mm-client-common" version = "0.1.0" edition = "2021" license = "MIT" [lib] crate-type = ["lib", "staticlib"] name = "mm_client_common" [[bin]] name = "uniffi-bindgen" path = "bin/uniffi-bindgen.rs" [dependencies] mm-protocol = { path = "../mm-protocol", features = ["uniffi"] } async-mutex = "1" bytes = "1" ip_rfc = "0.1" flume = "0.11" futures = { version = "0.3", features = ["executor"] } log = "0.4" mio = { version = "1", features = ["net", "os-ext", "os-poll"] } prost-types = "0.13" quiche = { version = "0.23", features = ["qlog"] } raptorq = "2.0" ring = "0.17" thiserror = "1" tracing = { version = "0.1", features = ["log"] } uniffi = { version = "0.28", features = ["cli"] } [build-dependencies] uniffi = { version = "0.27", features = ["build"] } ================================================ FILE: mm-client-common/bin/uniffi-bindgen.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT fn main() { uniffi::uniffi_bindgen_main() } ================================================ FILE: mm-client-common/src/attachment.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::sync::Arc; use async_mutex::Mutex as AsyncMutex; use futures::{channel::oneshot, future, FutureExt as _}; use mm_protocol as protocol; pub use protocol::audio_channels::Channel as AudioChannel; use tracing::error; use crate::{ codec, conn, display_params, input, packet::{self, PacketRing}, ClientError, ClientState, }; #[derive(Debug, Clone, uniffi::Record)] pub struct AttachmentConfig { /// The width of the video stream. pub width: u32, /// The height of the video stream. pub height: u32, /// The codec to use for the video stream. Leaving it empty allows the /// server to decide. pub video_codec: Option, /// The profile (bit depth and colorspace) to use for the video stream. /// Leaving it empty allows the server to decide. pub video_profile: Option, /// The quality preset, from 1-10. A None or 0 indicates the server should /// decide. pub quality_preset: Option, /// The codec to use for the audio stream. Leaving it empty allows the /// server to decide. pub audio_codec: Option, /// The sample rate to use for the audio stream. Leaving it empty allows the /// server to decide. pub sample_rate: Option, /// The channel layout to use for the audio stream. An empty vec indicates /// the server should decide. pub channels: Vec, /// An offset to apply to the stream_seq of incoming video packets. The /// offset is applied on the client side, and exists as a convenient way to /// way to ensure sequence numbers stay monotonic, even across individual /// attachment streams. pub video_stream_seq_offset: u64, /// An offset to apply to the stream_seq of incoming audio packets. The /// offset is applied on the client side, and exists as a convenient way to /// way to ensure sequence numbers stay monotonic, even across individual /// attachment streams. pub audio_stream_seq_offset: u64, } /// The settled video stream params, after the server has applied its defaults. #[derive(Debug, Clone, uniffi::Record)] pub struct VideoStreamParams { pub width: u32, pub height: u32, pub codec: codec::VideoCodec, pub profile: codec::VideoProfile, } /// The settled audio stream params, after the server has applied its defaults. #[derive(Debug, Clone, uniffi::Record)] pub struct AudioStreamParams { pub codec: codec::AudioCodec, pub sample_rate: u32, pub channels: Vec, } /// A handle for sending messages to the server over an attachment stream. /// /// An attachment is ended once the corresponding AttachmentDelegate receives /// the attachment_ended or parameters_changed (with reattach_required = true) /// callbacks. Using it past that point will silently drop events. #[derive(uniffi::Object)] pub struct Attachment { sid: u64, /// Used to un-munge the stream_seq for [Attachment::request_video_refresh]. video_stream_seq_offset: u64, // We store a copy of these so that we can send messages on the attachment // stream without locking the client mutex. outgoing: flume::Sender, conn_waker: Arc, detached: future::Shared>, } impl Attachment { pub(crate) async fn new( sid: u64, client: Arc>, attached: protocol::Attached, delegate: Arc, video_stream_seq_offset: u64, ) -> Result { let session_id = attached.session_id; let attachment_id = attached.attachment_id; let (detached_tx, detached_rx) = oneshot::channel(); let state = AttachmentState { session_id, attachment_id, delegate, attached_msg: attached, server_error: None, video_packet_ring: PacketRing::new(), video_stream_seq: None, prev_video_stream_seq: None, video_stream_seq_offset, audio_packet_ring: PacketRing::new(), audio_stream_seq: None, prev_audio_stream_seq: None, audio_stream_seq_offset: 0, notify_detached: Some(detached_tx), reattach_required: false, }; let mut guard = client.lock().await; let super::ConnHandle { outgoing, waker, attachments, .. } = match &guard.state { ClientState::Connected(conn) => conn, ClientState::Defunct(e) => return Err(e.clone()), }; let outgoing = outgoing.clone(); let conn_waker = waker.clone(); // Track the attachment in the client, so that the reactor thread will // send us messages. if attachments.send_async((sid, state)).await.is_err() { match guard.close() { Ok(_) => return Err(ClientError::Defunct), Err(e) => return Err(e), } } Ok(Self { sid, video_stream_seq_offset, outgoing, conn_waker, detached: detached_rx.shared(), }) } } /// Used by client implementations to handle attachment events. #[uniffi::export(with_foreign)] pub trait AttachmentDelegate: Send + Sync + std::fmt::Debug { /// The video stream is starting or restarting. fn video_stream_start(&self, stream_seq: u64, params: VideoStreamParams); /// A video packet is available. fn video_packet(&self, packet: Arc); /// A video packet was lost. fn dropped_video_packet(&self, dropped: packet::DroppedPacket); /// The audio stream is starting or restarting. fn audio_stream_start(&self, stream_seq: u64, params: AudioStreamParams); /// An audio packet is available. fn audio_packet(&self, packet: Arc); // The cursor was updated. fn update_cursor( &self, icon: input::CursorIcon, image: Option>, hotspot_x: u32, hotspot_y: u32, ); /// The pointer should be locked to the given location. fn lock_pointer(&self, x: f64, y: f64); /// The pointer should be released. fn release_pointer(&self); /// The remote session display params were changed. This usually requires /// the client to reattach. If reattach_required is true, the attachment /// should be considered ended. [attachment_ended] will not be called. fn display_params_changed( &self, params: display_params::DisplayParams, reattach_required: bool, ); /// The client encountered an error. The attachment should be considered /// ended. [attachment_ended] will not be called. fn error(&self, err: ClientError); /// The attachment was ended by the server. fn attachment_ended(&self); } impl Attachment { fn send(&self, msg: impl Into, fin: bool) { let _ = self.outgoing.send(conn::OutgoingMessage { sid: self.sid, msg: msg.into(), fin, }); let _ = self.conn_waker.wake(); } } #[uniffi::export] impl Attachment { /// Requests that the server generate a packet with headers and a keyframe. pub fn request_video_refresh(&self, stream_seq: u64) { self.send( protocol::RequestVideoRefresh { stream_seq: stream_seq - self.video_stream_seq_offset, }, false, ) } /// Sends keyboard input to the server. pub fn keyboard_input(&self, key: input::Key, state: input::KeyState, character: u32) { self.send( protocol::KeyboardInput { key: key.into(), state: state.into(), char: character, }, false, ) } /// Notifies the server that the pointer has entered the video area, /// including if it enters a letterbox around the video. pub fn pointer_entered(&self) { self.send(protocol::PointerEntered {}, false) } /// Notifies the server that the pointer has left the video area. This /// should consider any letterboxing part of the video area. pub fn pointer_left(&self) { self.send(protocol::PointerLeft {}, false) } /// Sends pointer motion to the server. pub fn pointer_motion(&self, x: f64, y: f64) { self.send(protocol::PointerMotion { x, y }, false) } /// Sends relative pointer motion to the server. pub fn relative_pointer_motion(&self, x: f64, y: f64) { self.send(protocol::RelativePointerMotion { x, y }, false) } /// Sends pointer input to the server. pub fn pointer_input(&self, button: input::Button, state: input::ButtonState, x: f64, y: f64) { self.send( protocol::PointerInput { button: button.into(), state: state.into(), x, y, }, false, ) } /// Sends pointer scroll events to the server. pub fn pointer_scroll(&self, scroll_type: input::ScrollType, x: f64, y: f64) { self.send( protocol::PointerScroll { scroll_type: scroll_type.into(), x, y, }, false, ) } /// Sends a 'Gamepad Available' event to the server. pub fn gamepad_available(&self, pad: input::Gamepad) { self.send( protocol::GamepadAvailable { gamepad: Some(pad.into()), }, false, ) } /// Sends a 'Gamepad Unavailable' event to the server. pub fn gamepad_unavailable(&self, id: u64) { self.send(protocol::GamepadUnavailable { id }, false) } /// Sends gamepad joystick motion to the server. pub fn gamepad_motion(&self, id: u64, axis: input::GamepadAxis, value: f64) { self.send( protocol::GamepadMotion { gamepad_id: id, axis: axis.into(), value, }, false, ) } /// Sends gamepad button input to the server. pub fn gamepad_input( &self, id: u64, button: input::GamepadButton, state: input::GamepadButtonState, ) { self.send( protocol::GamepadInput { gamepad_id: id, button: button.into(), state: state.into(), }, false, ) } /// Ends the attachment. pub async fn detach(&self) -> Result<(), ClientError> { self.send(protocol::Detach {}, true); Ok(self.detached.clone().await?) } } /// Internal state for an attachment. pub(crate) struct AttachmentState { pub(crate) session_id: u64, pub(crate) attachment_id: u64, delegate: Arc, attached_msg: protocol::Attached, reattach_required: bool, server_error: Option, video_packet_ring: PacketRing, video_stream_seq: Option, prev_video_stream_seq: Option, video_stream_seq_offset: u64, audio_packet_ring: PacketRing, audio_stream_seq: Option, prev_audio_stream_seq: Option, audio_stream_seq_offset: u64, // A future representing the end of the attachment. notify_detached: Option>, } impl AttachmentState { pub(crate) fn handle_message(&mut self, msg: protocol::MessageType) { match msg { protocol::MessageType::Attached(attached) => { error!( "unexpected {} on already-attached stream", protocol::MessageType::Attached(attached) ); } protocol::MessageType::VideoChunk(chunk) => { // We always send packets for two streams - the current one and // (if there is one) the previous one. if self.video_stream_seq.is_none_or(|s| s < chunk.stream_seq) { // A new stream started. self.prev_video_stream_seq = self.video_stream_seq; self.video_stream_seq = Some(chunk.stream_seq); let res = self.attached_msg.streaming_resolution.unwrap_or_default(); self.delegate.video_stream_start( chunk.stream_seq + self.video_stream_seq_offset, VideoStreamParams { width: res.width, height: res.height, codec: self.attached_msg.video_codec(), profile: self.attached_msg.video_profile(), }, ); // Discard any older packets. if let Some(prev) = self.prev_video_stream_seq { self.video_packet_ring.discard(prev.saturating_sub(1)); } } if let Err(err) = self.video_packet_ring.recv_chunk(chunk) { error!("error in packet ring: {:#}", err); } if let Some(prev) = self.prev_video_stream_seq { // Ignore dropped packets on the previous stream. for mut packet in self .video_packet_ring .drain_completed(prev) .flat_map(Result::ok) { packet.stream_seq += self.video_stream_seq_offset; self.delegate.video_packet(Arc::new(packet)); } } if self.video_stream_seq != self.prev_video_stream_seq { for res in self .video_packet_ring .drain_completed(self.video_stream_seq.unwrap()) { match res { Ok(mut packet) => { packet.stream_seq += self.video_stream_seq_offset; self.delegate.video_packet(Arc::new(packet)); } Err(mut dropped) => { dropped.stream_seq += self.video_stream_seq_offset; self.delegate.dropped_video_packet(dropped); } } } } } protocol::MessageType::AudioChunk(chunk) => { // We always send packets for two streams - the current one and // (if there is one) the previous one. if self.audio_stream_seq.is_none_or(|s| s < chunk.stream_seq) { // A new stream started. self.prev_audio_stream_seq = self.audio_stream_seq; self.audio_stream_seq = Some(chunk.stream_seq); let channels = self .attached_msg .channels .as_ref() .map(|c| c.channels().collect()) .unwrap_or_default(); self.delegate.audio_stream_start( chunk.stream_seq + self.audio_stream_seq_offset, AudioStreamParams { codec: self.attached_msg.audio_codec(), sample_rate: self.attached_msg.sample_rate_hz, channels, }, ); // Discard any older packets. if let Some(prev) = self.prev_audio_stream_seq { self.audio_packet_ring.discard(prev.saturating_sub(1)); } } if let Err(err) = self.audio_packet_ring.recv_chunk(chunk) { error!("error in packet ring: {:#}", err); } if let Some(prev) = self.prev_audio_stream_seq { for mut packet in self .audio_packet_ring .drain_completed(prev) .flat_map(Result::ok) { packet.stream_seq += self.audio_stream_seq_offset; self.delegate.audio_packet(Arc::new(packet)); } } if self.audio_stream_seq != self.prev_audio_stream_seq { for mut packet in self .audio_packet_ring .drain_completed(self.audio_stream_seq.unwrap()) .flat_map(Result::ok) { packet.stream_seq += self.audio_stream_seq_offset; self.delegate.audio_packet(Arc::new(packet)); } } } protocol::MessageType::UpdateCursor(msg) => { let image = match &msg.image { v if v.is_empty() => None, v => Some(v.to_vec()), }; self.delegate .update_cursor(msg.icon(), image, msg.hotspot_x, msg.hotspot_y); } protocol::MessageType::LockPointer(msg) => { self.delegate.lock_pointer(msg.x, msg.y); } protocol::MessageType::ReleasePointer(_) => self.delegate.release_pointer(), protocol::MessageType::SessionParametersChanged(msg) => { let Some(params) = msg.display_params.and_then(|p| p.try_into().ok()) else { error!(?msg, "invalid display params from server"); return; }; self.delegate .display_params_changed(params, msg.reattach_required); // Mute the attachment_ended callback once. self.reattach_required = msg.reattach_required; } protocol::MessageType::SessionEnded(_) => { // We just check for the fin on the attachment stream. } protocol::MessageType::Error(error) => { self.server_error = Some(error.clone()); self.delegate.error(ClientError::ServerError(error)); } v => error!("unexpected message on attachment stream: {}", v), } } pub(crate) fn handle_close(mut self, err: Option) { if let Some(tx) = self.notify_detached.take() { let _ = tx.send(()); } if self.reattach_required { self.reattach_required = false; } else if let Some(err) = err { self.delegate.error(err); } else if self.server_error.is_some() { // We don't call attachment_ended because we already called error. } else { self.delegate.attachment_ended(); } } } ================================================ FILE: mm-client-common/src/codec.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use mm_protocol as protocol; pub use protocol::{AudioCodec, VideoCodec, VideoProfile}; ================================================ FILE: mm-client-common/src/conn/hostport.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT #[derive(Debug, Eq, PartialEq)] pub(crate) struct MalformedHostPort; impl std::fmt::Display for MalformedHostPort { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "invalid host:port string") } } impl std::error::Error for MalformedHostPort {} /// Splits a network address into the host and port components. Accepts /// addresses of the following form: /// - "host" /// - "[host]" /// - "host:port" /// - "[host]:port" /// /// # References /// /// https://cs.opensource.google/go/go/+/refs/tags/go1.23.3:src/net/ipsock.go;l=165 pub(crate) fn split_host_port( hostport: impl AsRef<[u8]>, ) -> Result<(String, Option), MalformedHostPort> { let input = hostport.as_ref(); let mut split = rfind(input, b':'); let host; if input[0] == b'[' { let Some(end) = find(input, b']') else { return Err(MalformedHostPort); }; match end + 1 { v if v == input.len() => { host = &input[1..end]; split = None; } v if split.is_some_and(|i| v == i) => { host = &input[1..end]; } _ => return Err(MalformedHostPort), } if find(&input[1..], b'[').is_some() || find(&input[end + 1..], b']').is_some() { return Err(MalformedHostPort); } } else { host = &input[..split.unwrap_or(input.len())]; if find(input, b'[').is_some() || find(input, b']').is_some() { return Err(MalformedHostPort); } } let Ok(host) = std::str::from_utf8(host) else { return Err(MalformedHostPort); }; let port = if let Some(i) = split { Some( std::str::from_utf8(&input[i + 1..]) .ok() .and_then(|s| s.parse().ok()) .ok_or(MalformedHostPort)?, ) } else { None }; Ok((host.to_owned(), port)) } fn find(buf: &[u8], c: u8) -> Option { buf.iter().position(|x| x == &c) } fn rfind(buf: &[u8], c: u8) -> Option { buf.iter().rposition(|x| x == &c) } #[cfg(test)] mod tests { use super::*; #[test] fn test_split_host_port() { macro_rules! check { ($s:literal, $host:literal, $port:literal) => { assert_eq!(Ok(($host.to_string(), Some($port))), split_host_port($s)); }; ($s:literal, $host:literal) => { assert_eq!(Ok(($host.to_string(), None)), split_host_port($s)); }; ($s:literal, bad) => { assert_eq!(Err(MalformedHostPort), split_host_port($s)); }; } check!("foo", "foo"); check!("foo:9599", "foo", 9599); check!("[foo]", "foo"); check!("[foo]:9599", "foo", 9599); check!("[::1]", "::1"); check!("[::1]:9599", "::1", 9599); check!("foo:", bad); check!("foo:bar", bad); check!("[foo:]9599", bad); check!("[::1]:", bad); check!("[foo]]:9599", bad); check!("[[foo]]:9599", bad); } } ================================================ FILE: mm-client-common/src/conn.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT mod hostport; const DEFAULT_PORT: u16 = 9599; const MAX_QUIC_PACKET_SIZE: usize = 1350; const SOCKET: mio::Token = mio::Token(0); const WAKER: mio::Token = mio::Token(1); use std::{ collections::{HashMap, HashSet}, net::SocketAddr, sync::{atomic::Ordering, Arc}, time, }; use futures::channel::oneshot; use mm_protocol as protocol; use tracing::{debug, error, info, trace, warn}; use crate::stats::StatsCollector; #[derive(Debug, Clone, thiserror::Error)] pub enum ConnError { #[error("invalid address: {0}")] InvalidAddress(String), #[error("unexpected OS error: {0}")] Unknown(#[from] Arc), #[error("QUIC error")] QuicError(#[from] quiche::Error), #[error("connection timeout")] Timeout, #[error("connection closed due to inactivity")] Idle, #[error("closed by peer (is_app={}, code={})", .0.is_app, .0.error_code)] PeerError(quiche::ConnectionError), #[error("recv or send queue is full")] QueueFull, #[error("protocol error")] ProtocolError(#[from] protocol::ProtocolError), } // In order to let ConnError implement Clone, we need to wrap io::Error in Arc; // but then we lose From, which breaks the ? operator. impl From for ConnError { fn from(e: std::io::Error) -> Self { Self::Unknown(Arc::new(e)) } } #[derive(Debug, Clone)] pub(crate) enum ConnEvent { StreamMessage(u64, protocol::MessageType), Datagram(protocol::MessageType), StreamClosed(u64), } pub(crate) struct OutgoingMessage { pub(crate) sid: u64, pub(crate) msg: protocol::MessageType, pub(crate) fin: bool, } pub(crate) struct Conn { scratch: bytes::BytesMut, socket: mio::net::UdpSocket, local_addr: SocketAddr, poll: mio::Poll, waker: Arc, conn: quiche::Connection, partial_reads: HashMap, open_streams: HashSet, shutdown: oneshot::Receiver<()>, shutting_down: bool, incoming: flume::Sender, outgoing: flume::Receiver, ready: Option>>, stats_timer: time::Instant, stats_collector: Arc, } impl Conn { pub fn new( addr: &str, incoming: flume::Sender, outgoing: flume::Receiver, ready: oneshot::Sender>, shutdown: oneshot::Receiver<()>, stats: Arc, ) -> Result { let (hostname, server_addr) = resolve_server(addr)?; let bind_addr = match server_addr { std::net::SocketAddr::V4(_) => "0.0.0.0:0", std::net::SocketAddr::V6(_) => "[::]:0", }; let mut socket = mio::net::UdpSocket::bind(bind_addr.parse().unwrap())?; let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?; if !ip_rfc::global(&server_addr.ip()) { warn!("skipping TLS verification for private server address"); config.verify_peer(false); } config.set_application_protos(&[protocol::ALPN_PROTOCOL_VERSION])?; config.set_max_idle_timeout(60_000); config.set_max_recv_udp_payload_size(MAX_QUIC_PACKET_SIZE); config.set_max_send_udp_payload_size(MAX_QUIC_PACKET_SIZE); config.set_initial_max_data(65536); config.set_initial_max_stream_data_bidi_local(65536); config.set_initial_max_stream_data_bidi_remote(6536); config.set_initial_max_streams_bidi(100); config.set_initial_max_stream_data_uni(65536); config.set_initial_max_streams_uni(100); config.enable_dgram(true, 65536, 0); let initial_scid = gen_scid(); let local_addr = socket.local_addr().unwrap(); let conn = quiche::connect( Some(&hostname), &initial_scid, local_addr, server_addr, &mut config, )?; let scratch = bytes::BytesMut::with_capacity(65536); let poll = mio::Poll::new().unwrap(); let waker = Arc::new(mio::Waker::new(poll.registry(), WAKER)?); poll.registry() .register(&mut socket, SOCKET, mio::Interest::READABLE)?; Ok(Self { scratch, socket, local_addr, poll, waker, conn, partial_reads: HashMap::new(), open_streams: HashSet::new(), shutdown, shutting_down: false, incoming, outgoing, ready: Some(ready), stats_timer: time::Instant::now(), stats_collector: stats, }) } pub fn waker(&self) -> Arc { self.waker.clone() } pub fn run(&mut self, connect_timeout: time::Duration) -> Result<(), ConnError> { let mut events = mio::Events::with_capacity(1024); let start = time::Instant::now(); loop { const ONE_SECOND: time::Duration = time::Duration::from_secs(1); let timeout = self .conn .timeout() .map_or(ONE_SECOND, |d| d.min(ONE_SECOND)); self.poll.poll(&mut events, Some(timeout))?; let now = time::Instant::now(); if self.conn.timeout_instant().is_some_and(|t| now >= t) { self.conn.on_timeout(); } if self.conn.is_closed() || self.conn.is_draining() { if self.conn.is_timed_out() { return Err(ConnError::Idle); } else if self.conn.is_dgram_recv_queue_full() { return Err(ConnError::QueueFull); } else if let Some(err) = self.conn.peer_error() { return Err(ConnError::PeerError(err.clone())); } else if !self.shutting_down { panic!("connection closed unexpectedly"); } else { return Ok(()); } } if self.ready.is_some() { if self.conn.is_established() || self.conn.is_in_early_data() { trace!("connection ready"); let _ = self.ready.take().unwrap().send(Ok(())); } else if start.elapsed() > connect_timeout { let _ = self.ready.take().unwrap().send(Err(ConnError::Timeout)); } } if let Ok(Some(())) = self.shutdown.try_recv() { self.start_shutdown()?; } if (now - self.stats_timer) > time::Duration::from_millis(200) { self.stats_timer = now; let stats = self.conn.path_stats().next().unwrap(); self.stats_collector .rtt_us .store(stats.rtt.as_micros() as u64, Ordering::SeqCst); } // Read incoming UDP packets and handle them. loop { // TODO: use recv_mmsg for a small efficiency boost. self.scratch.resize(MAX_QUIC_PACKET_SIZE, 0); let (len, from) = match self.socket.recv_from(&mut self.scratch) { Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => { break; } v => v?, }; self.conn.recv( &mut self.scratch[..len], quiche::RecvInfo { from, to: self.local_addr, }, )?; } if (self.conn.is_established() || self.conn.is_in_early_data()) && !self.shutting_down { // Demux incoming messages and datagrams. for sid in self.conn.readable() { self.open_streams.insert(sid); self.pump_stream(sid)?; } loop { self.scratch.resize(protocol::MAX_MESSAGE_SIZE, 0); match self.conn.dgram_recv(&mut self.scratch) { Ok(len) => { let (msg, msg_len) = match protocol::decode_message(&self.scratch) { Ok(v) => v, Err(protocol::ProtocolError::InvalidMessageType(t, _)) => { warn!(msg_type = t, "ignoring unknown message type"); continue; } Err(e) => return Err(e.into()), }; debug_assert_eq!(msg_len, len); trace!(%msg, len, "received datagram"); match self.incoming.send(ConnEvent::Datagram(msg)) { Ok(()) => {} Err(_) => { self.start_shutdown()?; break; } } } Err(quiche::Error::Done) => break, Err(e) => { error!("QUIC recv error: {:#}", e); break; } } } // Enqueue outgoing messages. loop { match self.outgoing.try_recv() { Ok(OutgoingMessage { sid, msg, fin }) => { if matches!( self.conn.stream_capacity(sid), Err(quiche::Error::InvalidState) | Err(quiche::Error::StreamStopped(_)) ) { debug!(sid, %msg, "dropping outgoing message for finished stream"); continue; } self.open_streams.insert(sid); self.send_message(sid, msg, fin)?; } Err(flume::TryRecvError::Empty) => { break; } Err(flume::TryRecvError::Disconnected) => { self.start_shutdown()?; break; } } } // Garbage collect closed streams. let mut closed = Vec::new(); self.open_streams.retain(|sid| { if self.conn.stream_finished(*sid) { trace!(sid, "stream finished"); closed.push(*sid); false } else { true } }); for sid in closed { match self.incoming.send(ConnEvent::StreamClosed(sid)) { Ok(()) => {} Err(_) => { self.start_shutdown()?; break; } } } } // Write out UDP packets. loop { self.scratch.resize(MAX_QUIC_PACKET_SIZE, 0); let (len, send_info) = match self.conn.send(&mut self.scratch) { Ok(v) => v, Err(quiche::Error::Done) => break, Err(e) => { error!("QUIC send error: {:#}", e); break; } }; // TODO implement pacing with SO_TXTIME. (We can do // sendmmsg at the same time). self.socket.send_to(&self.scratch[..len], send_info.to)?; } } } fn pump_stream(&mut self, sid: u64) -> Result { use bytes::Buf; self.scratch.truncate(0); if let Some(partial) = self.partial_reads.remove(&sid) { self.scratch.unsplit(partial); } let mut off = self.scratch.len(); let mut stream_fin = false; loop { self.scratch.resize(off + protocol::MAX_MESSAGE_SIZE, 0); match self.conn.stream_recv(sid, &mut self.scratch[off..]) { Ok((len, fin)) => { off += len; if fin { stream_fin = true; break; } } Err(quiche::Error::Done) => break, Err(e) => return Err(e.into()), } } // Read messages (there may be multiple). self.scratch.truncate(off); let mut buf = self.scratch.split(); while !buf.is_empty() { let (msg, len) = match protocol::decode_message(&buf) { Ok(v) => v, Err(protocol::ProtocolError::ShortBuffer(n)) => { debug!(have = buf.len(), need = n, sid, "partial message"); self.partial_reads.insert(sid, buf); break; } Err(e) => { error!("protocol error: {:#}", e); break; } }; trace!( sid, %msg, len, fin = stream_fin, "received msg", ); buf.advance(len); match self.incoming.send(ConnEvent::StreamMessage(sid, msg)) { Ok(()) => {} Err(_) => { self.start_shutdown()?; break; } } } Ok(stream_fin) } fn send_message( &mut self, sid: u64, msg: protocol::MessageType, fin: bool, ) -> Result<(), ConnError> { self.scratch.resize(protocol::MAX_MESSAGE_SIZE, 0); let len = protocol::encode_message(&msg, &mut self.scratch)?; trace!(sid, %msg, fin, "sending message"); match self.conn.stream_send(sid, &self.scratch[..len], fin) { Ok(_) => Ok(()), Err(quiche::Error::Done) | Err(quiche::Error::FinalSize) => { warn!(sid, %msg, "dropping message on blocked stream"); if fin { // Try to close the connection anyway. let _ = self.conn.stream_send(sid, &[], fin); } Ok(()) } Err(e) => Err(e.into()), } } fn start_shutdown(&mut self) -> Result<(), ConnError> { match self.conn.close(true, 0x00, b"") { Ok(()) | Err(quiche::Error::Done) => (), Err(e) => return Err(e.into()), } self.shutting_down = true; Ok(()) } } fn gen_scid() -> quiche::ConnectionId<'static> { use ring::rand::SecureRandom; let mut scid = vec![0; quiche::MAX_CONN_ID_LEN]; ring::rand::SystemRandom::new().fill(&mut scid[..]).unwrap(); quiche::ConnectionId::from_vec(scid) } fn resolve_server(hostport: &str) -> Result<(String, SocketAddr), ConnError> { use std::net::ToSocketAddrs; let Ok((host, port)) = hostport::split_host_port(hostport) else { return Err(ConnError::InvalidAddress(hostport.to_string())); }; let port = port.unwrap_or_else(|| { info!("using default port ({DEFAULT_PORT})"); DEFAULT_PORT }); // Rust chokes on zone identifiers. They are rarely needed. let host = if let Some((before, _)) = host.rsplit_once('%') { before } else { &host }; let addr = (host, port) .to_socket_addrs() .map_err(|_| ConnError::InvalidAddress(hostport.to_string()))? .next() .unwrap(); Ok((host.to_string(), addr)) } ================================================ FILE: mm-client-common/src/display_params.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use mm_protocol as protocol; use crate::{pixel_scale::PixelScale, validation::*}; #[derive(Debug, Clone, PartialEq, Eq, uniffi::Record)] pub struct DisplayParams { pub width: u32, pub height: u32, pub framerate: u32, pub ui_scale: PixelScale, } impl TryFrom for DisplayParams { type Error = ValidationError; fn try_from(msg: protocol::VirtualDisplayParameters) -> Result { let res = required_field!(msg.resolution)?; Ok(DisplayParams { width: res.width, height: res.height, framerate: msg.framerate_hz, ui_scale: required_field!(msg.ui_scale)?.try_into()?, }) } } impl From for protocol::VirtualDisplayParameters { fn from(value: DisplayParams) -> Self { protocol::VirtualDisplayParameters { resolution: Some(protocol::Size { width: value.width, height: value.height, }), framerate_hz: value.framerate, ui_scale: Some(value.ui_scale.into()), } } } ================================================ FILE: mm-client-common/src/input.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use mm_protocol as protocol; pub use protocol::gamepad::GamepadLayout; pub use protocol::gamepad_input::{GamepadButton, GamepadButtonState}; pub use protocol::gamepad_motion::GamepadAxis; pub use protocol::keyboard_input::{Key, KeyState}; pub use protocol::pointer_input::{Button, ButtonState}; pub use protocol::pointer_scroll::ScrollType; pub use protocol::update_cursor::CursorIcon; use crate::validation::ValidationError; #[derive(Debug, Clone, Copy, uniffi::Record)] pub struct Gamepad { pub id: u64, pub layout: GamepadLayout, } impl From for protocol::Gamepad { fn from(value: Gamepad) -> Self { Self { id: value.id, layout: value.layout.into(), } } } impl TryFrom for Gamepad { type Error = ValidationError; fn try_from(value: protocol::Gamepad) -> Result { let layout = value .layout .try_into() .map_err(|_| ValidationError::InvalidEnum("layout".to_string()))?; if value.id == 0 { return Err(ValidationError::Required("id".to_string())); } Ok(Self { id: value.id, layout, }) } } ================================================ FILE: mm-client-common/src/lib.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::{ collections::{HashMap, HashSet}, sync::Arc, time, }; use async_mutex::{Mutex as AsyncMutex, MutexGuard as AsyncMutexGuard}; use futures::{channel::oneshot, executor::block_on}; use mm_protocol as protocol; use tracing::{debug, error}; mod attachment; mod conn; mod logging; mod packet; mod session; mod stats; mod validation; pub mod codec; pub mod display_params; pub mod input; pub mod pixel_scale; pub use attachment::*; pub use logging::*; pub use packet::*; pub use session::*; uniffi::setup_scaffolding!(); pub use protocol::error::ErrorCode; #[derive(Debug, Clone, thiserror::Error, uniffi::Error)] #[uniffi(flat_error)] pub enum ClientError { #[error("protocol error")] ProtocolError(#[from] protocol::ProtocolError), #[error("{}: {}", .0.err_code().as_str_name(), .0.error_text)] ServerError(protocol::Error), #[error("request timed out")] RequestTimeout, #[error("connection error")] ConnectionError(#[from] conn::ConnError), #[error("stream closed before request could be received")] Canceled(#[from] oneshot::Canceled), #[error("received unexpected message: {0}")] UnexpectedMessage(protocol::MessageType), #[error("message validation failed")] ValidationFailed(#[from] validation::ValidationError), #[error("client defunct")] Defunct, #[error("attachment ended")] Detached, } /// A handle for the QUIC connection thread, used to push outgoing messages. struct ConnHandle { thread_handle: std::thread::JoinHandle>, waker: Arc, outgoing: flume::Sender, roundtrips: flume::Sender<(u64, Roundtrip)>, attachments: flume::Sender<(u64, AttachmentState)>, shutdown: oneshot::Sender<()>, } impl ConnHandle { /// Signals the connection thread that it should close. fn close(self) -> Result<(), Option> { let _ = self.shutdown.send(()); self.waker.wake().map_err(conn::ConnError::from)?; if !self.thread_handle.is_finished() { return Ok(()); } match self.thread_handle.join() { Ok(Ok(_)) => Ok(()), Ok(Err(e)) => Err(Some(e)), // The connection thread panicked. Err(_) => { error!("connection thread panicked"); Err(None) } } } } /// Stores the current connection state. enum ClientState { Connected(ConnHandle), Defunct(ClientError), } struct Roundtrip { tx: oneshot::Sender>, deadline: Option, } /// Client state inside the mutex. struct InnerClient { next_stream_id: u64, state: ClientState, } impl InnerClient { fn next_stream_id(&mut self) -> u64 { let sid = self.next_stream_id; self.next_stream_id += 4; sid } fn close(&mut self) -> Result<(), ClientError> { if let ClientState::Defunct(err) = &self.state { return Err(err.clone()); } let ClientState::Connected(conn) = std::mem::replace(&mut self.state, ClientState::Defunct(ClientError::Defunct)) else { unreachable!(); }; //Shut down the connection thread. let close_err = conn.close(); if let Err(Some(e)) = &close_err { error!("connection error: {e:?}"); self.state = ClientState::Defunct(e.clone().into()); } match close_err { Ok(_) => Ok(()), Err(Some(e)) => Err(e.into()), Err(None) => Err(ClientError::Defunct), } } } #[derive(uniffi::Object)] pub struct Client { name: String, addr: String, connect_timeout: time::Duration, inner: Arc>, stats: Arc, } impl Client { async fn reconnect(&self) -> Result, ClientError> { let inner_clone = self.inner.clone(); let mut guard = self.inner.lock().await; match &guard.state { ClientState::Connected(_) => (), ClientState::Defunct(ClientError::ConnectionError(conn::ConnError::Idle)) => { // Reconnect after an idle timeout. let conn = match spawn_conn( &self.addr, inner_clone, self.stats.clone(), self.connect_timeout, ) .await { Ok(conn) => conn, Err(e) => { error!("connection failed: {e:#}"); return Err(e); } }; guard.state = ClientState::Connected(conn); debug!("reconnected after idle timeout"); } ClientState::Defunct(e) => { return Err(e.clone()); } } Ok(guard) } async fn initiate_stream( &self, msg: impl Into, fin: bool, timeout: Option, ) -> Result<(u64, protocol::MessageType), ClientError> { let mut guard = self.reconnect().await?; let sid = guard.next_stream_id(); let (oneshot_tx, oneshot_rx) = oneshot::channel(); let ConnHandle { waker, outgoing, roundtrips, .. } = match &guard.state { ClientState::Connected(conn) => conn, ClientState::Defunct(err) => return Err(err.clone()), }; if outgoing .send(conn::OutgoingMessage { sid, msg: msg.into(), fin, }) .is_err() { match guard.close() { Ok(_) => return Err(ClientError::Defunct), Err(e) => return Err(e), } } let deadline = timeout.map(|d| time::Instant::now() + d); if roundtrips .send_async(( sid, Roundtrip { tx: oneshot_tx, deadline, }, )) .await .is_err() { match guard.close() { Ok(_) => return Err(ClientError::Defunct), Err(e) => return Err(e), } }; waker.wake().map_err(conn::ConnError::from)?; // We don't want to hold the mutex while waiting for a response. drop(guard); let res = oneshot_rx.await??; Ok((sid, res)) } async fn roundtrip( &self, msg: impl Into, timeout: time::Duration, ) -> Result { let (_, msg) = self.initiate_stream(msg, false, Some(timeout)).await?; Ok(msg) } } #[uniffi::export] impl Client { #[uniffi::constructor] pub async fn new( addr: &str, client_name: &str, connect_timeout: time::Duration, ) -> Result { let inner = Arc::new(AsyncMutex::new(InnerClient { next_stream_id: 0, state: ClientState::Defunct(ClientError::Defunct), })); let stats = Arc::new(stats::StatsCollector::default()); let conn = spawn_conn(addr, inner.clone(), stats.clone(), connect_timeout).await?; inner.lock().await.state = ClientState::Connected(conn); Ok(Self { name: client_name.to_owned(), addr: addr.to_owned(), connect_timeout, inner, stats, }) } pub fn stats(&self) -> stats::ClientStats { self.stats.snapshot() } pub async fn list_applications( &self, timeout: time::Duration, ) -> Result, ClientError> { let res = match self .roundtrip(protocol::ListApplications {}, timeout) .await? { protocol::MessageType::ApplicationList(res) => res, protocol::MessageType::Error(e) => return Err(ClientError::ServerError(e)), msg => return Err(ClientError::UnexpectedMessage(msg)), }; Ok(res .list .into_iter() .map(Application::try_from) .collect::, validation::ValidationError>>()?) } pub async fn fetch_application_image( &self, application_id: String, format: session::ApplicationImageFormat, timeout: time::Duration, ) -> Result, ClientError> { let fetch = protocol::FetchApplicationImage { format: format.into(), application_id, }; match self.roundtrip(fetch, timeout).await? { protocol::MessageType::ApplicationImage(res) => Ok(res.image_data.into()), protocol::MessageType::Error(e) => Err(ClientError::ServerError(e)), msg => Err(ClientError::UnexpectedMessage(msg)), } } pub async fn list_sessions( &self, timeout: time::Duration, ) -> Result, ClientError> { let res = match self.roundtrip(protocol::ListSessions {}, timeout).await? { protocol::MessageType::SessionList(res) => res, protocol::MessageType::Error(e) => return Err(ClientError::ServerError(e)), msg => return Err(ClientError::UnexpectedMessage(msg)), }; Ok(res .list .into_iter() .map(Session::try_from) .collect::, validation::ValidationError>>()?) } pub async fn launch_session( &self, application_id: String, display_params: display_params::DisplayParams, permanent_gamepads: Vec, timeout: time::Duration, ) -> Result { let msg = protocol::LaunchSession { application_id: application_id.clone(), display_params: Some(display_params.clone().into()), permanent_gamepads: permanent_gamepads.iter().map(|pad| (*pad).into()).collect(), }; let res = match self.roundtrip(msg, timeout).await? { protocol::MessageType::SessionLaunched(msg) => msg, protocol::MessageType::Error(e) => return Err(ClientError::ServerError(e)), msg => return Err(ClientError::UnexpectedMessage(msg)), }; Ok(Session { id: res.id, start: time::SystemTime::now(), application_id, display_params, }) } pub async fn end_session(&self, id: u64, timeout: time::Duration) -> Result<(), ClientError> { let msg = protocol::EndSession { session_id: id }; match self.roundtrip(msg, timeout).await? { protocol::MessageType::SessionEnded(_) => Ok(()), protocol::MessageType::Error(e) => Err(ClientError::ServerError(e)), msg => Err(ClientError::UnexpectedMessage(msg)), } } pub async fn update_session_display_params( &self, id: u64, params: display_params::DisplayParams, timeout: time::Duration, ) -> Result<(), ClientError> { let msg = protocol::UpdateSession { session_id: id, display_params: Some(params.into()), }; match self.roundtrip(msg, timeout).await? { protocol::MessageType::SessionUpdated(_) => Ok(()), protocol::MessageType::Error(e) => Err(ClientError::ServerError(e)), msg => Err(ClientError::UnexpectedMessage(msg)), } } /// Attach to a session. The timeout parameter is used for the duration of /// the initial request, i.e. until an Attached message is returned by the /// server. pub async fn attach_session( &self, session_id: u64, config: AttachmentConfig, delegate: Arc, timeout: time::Duration, ) -> Result { // Send an attach message using the roundtrip mechanism, but the leave // the stream open. let channel_conf = if config.channels.is_empty() { None } else { Some(protocol::AudioChannels { channels: config.channels.iter().copied().map(Into::into).collect(), }) }; let attach = protocol::Attach { session_id, client_name: self.name.clone(), attachment_type: protocol::AttachmentType::Operator.into(), video_codec: config.video_codec.unwrap_or_default().into(), streaming_resolution: Some(protocol::Size { width: config.width, height: config.height, }), video_profile: config.video_profile.unwrap_or_default().into(), quality_preset: config.quality_preset.unwrap_or_default(), audio_codec: config.audio_codec.unwrap_or_default().into(), sample_rate_hz: config.sample_rate.unwrap_or_default(), channels: channel_conf, }; let (sid, res) = self.initiate_stream(attach, false, Some(timeout)).await?; let attached = match res { protocol::MessageType::Attached(att) => att, protocol::MessageType::Error(e) => return Err(ClientError::ServerError(e)), msg => return Err(ClientError::UnexpectedMessage(msg)), }; Attachment::new( sid, self.inner.clone(), attached, delegate, config.video_stream_seq_offset, ) .await } } async fn spawn_conn( addr: &str, client: Arc>, stats: Arc, connect_timeout: time::Duration, ) -> Result { let (incoming_tx, incoming_rx) = flume::unbounded(); let (outgoing_tx, outgoing_rx) = flume::unbounded(); let (ready_tx, ready_rx) = oneshot::channel(); let (shutdown_tx, shutdown_rx) = oneshot::channel(); // Rendezvous channels for synchronized state. let (roundtrips_tx, roundtrips_rx) = flume::bounded(0); let (attachments_tx, attachments_rx) = flume::bounded(0); let mut conn = conn::Conn::new(addr, incoming_tx, outgoing_rx, ready_tx, shutdown_rx, stats)?; let waker = conn.waker(); // Spawn a polling loop for the quic connection. let thread_handle = std::thread::Builder::new() .name("QUIC conn".to_string()) .spawn(move || conn.run(connect_timeout)) .unwrap(); // Spawn a second thread to fulfill request/response futures and drive // the attachment delegates. let _ = std::thread::Builder::new() .name("mmclient reactor".to_string()) .spawn(move || conn_reactor(incoming_rx, roundtrips_rx, attachments_rx, client)) .unwrap(); if ready_rx.await.is_err() { // An error occured while spinning up. match thread_handle.join() { Ok(Ok(_)) | Err(_) => return Err(ClientError::Defunct), Ok(Err(e)) => return Err(e.into()), } } Ok(ConnHandle { thread_handle, waker, outgoing: outgoing_tx, shutdown: shutdown_tx, roundtrips: roundtrips_tx, attachments: attachments_tx, }) } #[derive(Default)] struct InFlight { roundtrips: HashMap, attachments: HashMap, prev_attachments: HashSet, // By attachment ID. } fn conn_reactor( incoming: flume::Receiver, roundtrips: flume::Receiver<(u64, Roundtrip)>, attachments: flume::Receiver<(u64, AttachmentState)>, client: Arc>, ) { let mut in_flight = InFlight::default(); let mut tick = time::Instant::now() + time::Duration::from_secs(1); loop { // Perform some cleanup once per second. let now = time::Instant::now(); if now > tick { tick = now + time::Duration::from_secs(1); // Check roundtrip deadlines. let mut timed_out = Vec::new(); for (sid, Roundtrip { deadline, .. }) in in_flight.roundtrips.iter() { if deadline.is_some_and(|dl| now >= dl) { timed_out.push(*sid); } } // Fulfill the futures with an error. for id in &timed_out { let Roundtrip { tx, .. } = in_flight.roundtrips.remove(id).unwrap(); let _ = tx.send(Err(ClientError::RequestTimeout)); } } enum SelectResult { RecvError, InsertRoundtrip(u64, Roundtrip), InsertAttachment(u64, AttachmentState), Incoming(conn::ConnEvent), } let res = flume::select::Selector::new() .recv(&roundtrips, |ev| { if let Ok((sid, rt)) = ev { SelectResult::InsertRoundtrip(sid, rt) } else { SelectResult::RecvError } }) .recv(&attachments, |ev| { if let Ok((sid, att)) = ev { SelectResult::InsertAttachment(sid, att) } else { SelectResult::RecvError } }) .recv(&incoming, |ev| { if let Ok(ev) = ev { SelectResult::Incoming(ev) } else { SelectResult::RecvError } }) .wait_deadline(tick); match res { Err(flume::select::SelectError::Timeout) => continue, Ok(SelectResult::RecvError) => break, Ok(SelectResult::InsertRoundtrip(sid, rt)) => { in_flight.roundtrips.insert(sid, rt); } Ok(SelectResult::InsertAttachment(sid, att)) => { in_flight.attachments.insert(sid, att); } Ok(SelectResult::Incoming(ev)) => conn_reactor_handle_incoming(&mut in_flight, ev), }; } // The client is probably already closed, but we should make sure, since // this thread is the only one notified if the connection thread died. let mut guard = block_on(client.lock()); let stream_err = match guard.close() { Err(e) => Some(e.clone()), Ok(_) => None, }; for (_, att) in in_flight.attachments.drain() { att.handle_close(stream_err.clone()); } in_flight.roundtrips.clear(); // Cancels the futures. } fn conn_reactor_handle_incoming(in_flight: &mut InFlight, ev: conn::ConnEvent) { match ev { conn::ConnEvent::StreamMessage(sid, msg) => { if let Some(attachment) = in_flight.attachments.get_mut(&sid) { attachment.handle_message(msg); return; } if let Some(Roundtrip { tx, .. }) = in_flight.roundtrips.remove(&sid) { let _ = tx.send(Ok(msg)); } } conn::ConnEvent::Datagram(msg) => { let (session_id, attachment_id) = match &msg { protocol::MessageType::VideoChunk(chunk) => (chunk.session_id, chunk.attachment_id), protocol::MessageType::AudioChunk(chunk) => (chunk.session_id, chunk.attachment_id), msg => { error!("unexpected {} as datagram", msg); return; } }; // Find the relevant attachment. The session ID and attachment // may be omitted if there's only one attachment. let attachment = match (session_id, attachment_id) { (0, 0) if in_flight.attachments.len() == 1 => { in_flight.attachments.iter_mut().next() } (0, _) | (_, 0) => None, // This is invalid. (s, a) => in_flight .attachments .iter_mut() .find(|(_, att)| att.session_id == s && att.attachment_id == a), }; if let Some((_, attachment)) = attachment { attachment.handle_message(msg); } else if !in_flight.prev_attachments.contains(&attachment_id) { error!( session_id, attachment_id, "failed to match datagram to attachment" ); } } conn::ConnEvent::StreamClosed(sid) => { in_flight.roundtrips.remove(&sid); if let Some(attachment) = in_flight.attachments.remove(&sid) { in_flight.prev_attachments.insert(attachment.attachment_id); attachment.handle_close(None); } } } } ================================================ FILE: mm-client-common/src/logging.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::sync::{Arc, OnceLock}; #[derive(uniffi::Enum)] pub enum LogLevel { None, Trace, Debug, Info, Warn, Error, } impl From for LogLevel { fn from(value: log::Level) -> Self { match value { log::Level::Trace => LogLevel::Trace, log::Level::Debug => LogLevel::Debug, log::Level::Info => LogLevel::Info, log::Level::Warn => LogLevel::Warn, log::Level::Error => LogLevel::Error, } } } /// An interface for receiving logs from this library. #[uniffi::export(with_foreign)] pub trait LogDelegate: Send + Sync + std::fmt::Debug { fn log(&self, level: LogLevel, target: String, msg: String); } struct LogWrapper(Arc); impl log::Log for LogWrapper { fn enabled(&self, metadata: &log::Metadata) -> bool { metadata.level() <= log::max_level() } fn log(&self, record: &log::Record) { if self.enabled(record.metadata()) { LogDelegate::log( &*self.0, record.level().into(), record.target().to_owned(), record.args().to_string(), ) } } fn flush(&self) {} } /// Set the minimum log level. #[uniffi::export] fn set_log_level(level: LogLevel) { let filter = match level { LogLevel::None => log::LevelFilter::Off, LogLevel::Trace => log::LevelFilter::Trace, LogLevel::Debug => log::LevelFilter::Debug, LogLevel::Info => log::LevelFilter::Info, LogLevel::Warn => log::LevelFilter::Warn, LogLevel::Error => log::LevelFilter::Error, }; log::set_max_level(filter); } /// Set the global logger. #[uniffi::export] fn set_logger(logger: Arc) { // This has to accept an Arc to be exportable by uniffi, however awkward // that may be. static LOGGER: OnceLock = OnceLock::new(); let logger = LOGGER.get_or_init(|| LogWrapper(logger)); log::set_logger(logger).expect("failed to set logger") } ================================================ FILE: mm-client-common/src/packet/ring.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::collections::{BTreeMap, VecDeque}; use mm_protocol as protocol; use tracing::warn; use super::{DroppedPacket, Packet}; const RING_TARGET_SIZE: usize = 5; pub(crate) trait Chunk { fn seq(&self) -> u64; fn stream_seq(&self) -> u64; fn chunk(&self) -> u32; fn num_chunks(&self) -> u32; fn data(&self) -> bytes::Bytes; fn pts(&self) -> u64; fn hierarchical_layer(&self) -> u32; fn fec_metadata(&self) -> Option; } impl Chunk for protocol::VideoChunk { fn seq(&self) -> u64 { self.seq } fn stream_seq(&self) -> u64 { self.stream_seq } fn chunk(&self) -> u32 { self.chunk } fn num_chunks(&self) -> u32 { self.num_chunks } fn data(&self) -> bytes::Bytes { self.data.clone() } fn pts(&self) -> u64 { self.timestamp } fn hierarchical_layer(&self) -> u32 { self.hierarchical_layer } fn fec_metadata(&self) -> Option { self.fec_metadata.clone() } } impl Chunk for protocol::AudioChunk { fn seq(&self) -> u64 { self.seq } fn stream_seq(&self) -> u64 { self.stream_seq } fn chunk(&self) -> u32 { self.chunk } fn num_chunks(&self) -> u32 { self.num_chunks } fn data(&self) -> bytes::Bytes { self.data.clone() } fn pts(&self) -> u64 { self.timestamp } fn hierarchical_layer(&self) -> u32 { 0 } fn fec_metadata(&self) -> Option { self.fec_metadata.clone() } } #[derive(Debug)] enum FECDecoder { Plain(Vec>), RaptorQ { dec: raptorq::Decoder, res: Option, }, } #[derive(Debug)] struct WipPacket { stream_seq: u64, seq: u64, pts: u64, hierarchical_layer: u32, decoder: FECDecoder, } impl WipPacket { fn new(incoming: impl Chunk) -> Result { let decoder = if let Some(md) = incoming.fec_metadata() { if md.fec_scheme() != protocol::fec_metadata::FecScheme::Raptorq { return Err(PacketRingError::UnsupportedFecScheme(md.fec_scheme)); } let oti: &[u8] = &md.fec_oti; let Ok(config) = oti .try_into() .map(raptorq::ObjectTransmissionInformation::deserialize) else { return Err(PacketRingError::InvalidFecMetadata); }; FECDecoder::RaptorQ { dec: raptorq::Decoder::new(config), res: None, } } else { FECDecoder::Plain(vec![None; incoming.num_chunks().max(1) as usize]) }; let mut this = Self { stream_seq: incoming.stream_seq(), seq: incoming.seq(), pts: incoming.pts(), hierarchical_layer: incoming.hierarchical_layer(), decoder, }; this.insert(incoming)?; Ok(this) } fn insert(&mut self, incoming: impl Chunk) -> Result<(), PacketRingError> { match &mut self.decoder { FECDecoder::Plain(ref mut chunks) => { let chunk = incoming.chunk() as usize; let num_chunks = incoming.num_chunks() as usize; if num_chunks != chunks.len() || chunk >= num_chunks { return Err(PacketRingError::InvalidChunk(chunk, num_chunks)); } else if chunks[chunk].is_some() { return Err(PacketRingError::DuplicateChunk(chunk)); } chunks[chunk] = Some(incoming.data()); Ok(()) } FECDecoder::RaptorQ { dec, .. } => { let Some(md) = incoming.fec_metadata() else { return Err(PacketRingError::InvalidFecMetadata); }; let b: &[u8] = &md.fec_payload_id; let Ok(payload_id) = b.try_into().map(raptorq::PayloadId::deserialize) else { return Err(PacketRingError::InvalidFecMetadata); }; dec.add_new_packet(raptorq::EncodingPacket::new( payload_id, incoming.data().into(), )); Ok(()) } } } fn is_complete(&mut self) -> bool { match &mut self.decoder { FECDecoder::Plain(chunks) => chunks.iter().all(|c| c.is_some()), FECDecoder::RaptorQ { dec, ref mut res } => { if res.is_some() { true } else if let Some(data) = dec.get_result() { *res = Some(bytes::Bytes::from(data)); true } else { false } } } } /// Reconstructs the completed frame. Panics if the packet is not yet /// recoverable. fn complete(self) -> Packet { let data = match self.decoder { FECDecoder::Plain(chunks) => { let chunks: Vec<_> = chunks .into_iter() .map(|c| c.expect("packet incomplete")) .collect(); chunks.into() } FECDecoder::RaptorQ { dec, res } => { let data = res.unwrap_or_else(|| { bytes::Bytes::from(dec.get_result().expect("packet incomplete")) }); [data].into() } }; Packet { pts: self.pts, seq: self.seq, stream_seq: self.stream_seq, hierarchical_layer: self.hierarchical_layer, data, } } } #[derive(Debug, PartialEq, Eq, Clone, thiserror::Error)] pub(crate) enum PacketRingError { #[error("invalid chunk {0} of {1}")] InvalidChunk(usize, usize), #[error("duplicate chunk {0}")] DuplicateChunk(usize), #[error("unsupported FEC scheme: {0}")] UnsupportedFecScheme(i32), #[error("invalid FEC metadatata")] InvalidFecMetadata, } #[derive(Default)] pub(crate) struct PacketRing { // Oldest frames at the front, newest at the back. ring: VecDeque, min_stream_seq: u64, min_seq: BTreeMap, // Indexed by stream_seq. dropped: VecDeque, } impl PacketRing { pub(crate) fn new() -> Self { Self::default() } pub(crate) fn recv_chunk(&mut self, incoming: impl Chunk) -> Result<(), PacketRingError> { let stream_seq = incoming.stream_seq(); let seq_floor = self.min_seq.get(&stream_seq).copied().unwrap_or_default(); if incoming.stream_seq() < self.min_stream_seq || incoming.seq() < seq_floor { return Ok(()); } match self .ring .iter_mut() .find(|wip| wip.stream_seq == incoming.stream_seq() && wip.seq == incoming.seq()) { Some(wip) => wip.insert(incoming), None => { let wip = WipPacket::new(incoming)?; // Insert into the ring in order with respect to packets with // the same stream_seq. if let Some(idx) = self .ring .iter() .position(|p| p.stream_seq == wip.stream_seq && p.seq > wip.seq) { self.ring.insert(idx, wip); } else { self.ring.push_back(wip); } loop { let len = self.ring.len(); let front = self.ring.front_mut().unwrap(); if front.is_complete() || len <= RING_TARGET_SIZE { break; } // If the oldest frame is incomplete, drop it to make room. if !front.is_complete() { let dropped = self.ring.pop_front().unwrap(); warn!( seq = dropped.seq, stream_seq = dropped.stream_seq, hierarchical_layer = dropped.hierarchical_layer, "dropped packet!", ); self.dropped.push_back(DroppedPacket { pts: dropped.pts, seq: dropped.seq, stream_seq: dropped.stream_seq, hierarchical_layer: dropped.hierarchical_layer, }) } else { break; } } Ok(()) } } } /// Removes packets matching the stream_seq for which all chunks are /// accounted for, and returns them as an iterator. Stops before the first /// incomplete packet that matches. /// /// The iterator must be used to actually remove packets from the ring. /// Dropping the iterator early will not drop the remaining packets. pub(crate) fn drain_completed(&mut self, stream_seq: u64) -> DrainCompleted { DrainCompleted(self, stream_seq) } /// Removes all packets with the same stream_seq or lower. pub(crate) fn discard(&mut self, stream_seq: u64) { self.min_stream_seq = stream_seq + 1; self.ring.retain(|wip| wip.stream_seq > stream_seq); self.min_seq.retain(|x, _| *x > stream_seq); } } pub(crate) struct DrainCompleted<'a>(&'a mut PacketRing, u64); impl Iterator for DrainCompleted<'_> { type Item = Result; fn next(&mut self) -> Option { let dropped = self .0 .dropped .iter() .position(|p| p.stream_seq == self.1) .and_then(|idx| self.0.dropped.remove(idx)); if let Some(dropped) = dropped { self.0.min_seq.insert(dropped.stream_seq, dropped.seq + 1); return Some(Err(dropped)); } let ring = &mut self.0.ring; match ring .iter_mut() .enumerate() .find(|(_, wip)| wip.stream_seq == self.1) { Some((idx, ref mut v)) => { if v.is_complete() { self.0.min_seq.insert(v.stream_seq, v.seq + 1); Some(Ok(ring.remove(idx).unwrap().complete())) } else { None } } _ => None, } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_ring() { let mut ring = PacketRing::default(); let assert_frames = |ring: &mut PacketRing, s: &[u64]| { let completed = ring.drain_completed(0).collect::>(); assert_eq!(s.len(), completed.len()); for (expected_seq, actual) in s.iter().zip(completed.into_iter()) { let actual = actual.expect("no dropped packet"); assert_eq!(actual.seq, *expected_seq); assert_eq!(&actual.data(), &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); } }; let frame_one = make_chunks(0, &[&[0, 1, 2], &[3, 4, 5, 6], &[7, 8], &[9]]); // 4 chunks let frame_two = make_chunks(1, &[&[0, 1, 2, 3, 4], &[5, 6], &[7, 8, 9]]); // 3 chunks let frame_three = make_chunks(2, &[&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]); // 1 chunk ring.recv_chunk(frame_three[0].clone()).unwrap(); // Frame three complete. ring.recv_chunk(frame_two[1].clone()).unwrap(); ring.recv_chunk(frame_one[0].clone()).unwrap(); assert_eq!(ring.drain_completed(0).collect::>().len(), 0); ring.recv_chunk(frame_one[1].clone()).unwrap(); ring.recv_chunk(frame_one[2].clone()).unwrap(); ring.recv_chunk(frame_two[0].clone()).unwrap(); assert_eq!(ring.drain_completed(0).collect::>().len(), 0); ring.recv_chunk(frame_one[3].clone()).unwrap(); // Frame one complete. assert_frames(&mut ring, &[0]); ring.recv_chunk(frame_two[2].clone()).unwrap(); // Frame two complete, frame three was already complete. assert_frames(&mut ring, &[1, 2]); assert_eq!(ring.drain_completed(0).collect::>().len(), 0); } #[test] fn test_ring_drop() { let mut ring = PacketRing::default(); for i in 0..10 { // Send ten partial frames (each missing one chunk.) let chunks = make_chunks(i, &[&[0, 1], &[2, 3]]); ring.recv_chunk(chunks[0].clone()).unwrap(); } // Then send a complete frame. let chunks = make_chunks(10, &[&[0, 1], &[2, 3], &[4, 5], &[6, 7], &[8, 9]]); for chunk in chunks { ring.recv_chunk(chunk).unwrap(); } for i in 11..20 { // Send more partial frames. let chunks = make_chunks(i, &[&[0, 1], &[2, 3]]); ring.recv_chunk(chunks[0].clone()).unwrap(); } // The ring should have dropped the partial frames and should indicate // that alongside the completed one. let completed = ring.drain_completed(0).collect::>(); assert_eq!(11, completed.len()); assert_eq!(completed[0].as_ref().err().unwrap().seq, 0); assert_eq!(completed[1].as_ref().err().unwrap().seq, 1); assert_eq!(completed[2].as_ref().err().unwrap().seq, 2); assert_eq!(completed[3].as_ref().err().unwrap().seq, 3); assert_eq!(completed[4].as_ref().err().unwrap().seq, 4); assert_eq!(completed[5].as_ref().err().unwrap().seq, 5); assert_eq!(completed[6].as_ref().err().unwrap().seq, 6); assert_eq!(completed[7].as_ref().err().unwrap().seq, 7); assert_eq!(completed[8].as_ref().err().unwrap().seq, 8); assert_eq!(completed[9].as_ref().err().unwrap().seq, 9); assert_eq!(completed[10].as_ref().unwrap().seq, 10); let frame = completed.last().unwrap(); assert_eq!( &frame.as_ref().unwrap().data(), &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9] ); } fn make_chunks(seq: u64, chunks: &[&[u8]]) -> Vec { chunks .iter() .enumerate() .map(|(i, chunk)| protocol::VideoChunk { attachment_id: 0, session_id: 0, stream_seq: 0, seq, chunk: i as u32, num_chunks: chunks.len() as u32, data: bytes::Bytes::copy_from_slice(chunk), timestamp: 0, hierarchical_layer: 0, fec_metadata: None, }) .collect() } } ================================================ FILE: mm-client-common/src/packet.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT mod ring; use std::collections::VecDeque; pub(crate) use ring::*; #[derive(Debug, Clone, uniffi::Object)] pub struct Packet { pub(crate) pts: u64, pub(crate) seq: u64, pub(crate) stream_seq: u64, pub(crate) hierarchical_layer: u32, data: VecDeque, } #[derive(Debug, Clone, uniffi::Record)] pub struct DroppedPacket { pub pts: u64, pub seq: u64, pub stream_seq: u64, pub hierarchical_layer: u32, } #[uniffi::export] impl Packet { pub fn pts(&self) -> u64 { self.pts } pub fn stream_seq(&self) -> u64 { self.stream_seq } pub fn seq(&self) -> u64 { self.seq } pub fn hierarchical_layer(&self) -> u32 { self.hierarchical_layer } pub fn data(&self) -> Vec { if self.data.len() == 1 { self.data[0].to_vec() } else { use bytes::buf::BufMut; let mut buf = Vec::with_capacity(self.len()); for chunk in self.data.iter() { buf.put(chunk.clone()); } buf } } } impl Packet { pub fn len(&self) -> usize { self.data.iter().map(|c| c.len()).sum() } pub fn is_empty(&self) -> bool { self.len() == 0 } // Copies the packet data into dst. The length of dst must match the pub fn copy_to_slice(&self, mut dst: &mut [u8]) { use bytes::buf::BufMut; for chunk in self.data.iter() { dst.put(chunk.clone()); } } } ================================================ FILE: mm-client-common/src/pixel_scale.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use mm_protocol as protocol; use crate::validation::*; #[derive(Debug, Clone, Copy, PartialEq, Eq, uniffi::Record)] pub struct PixelScale { numerator: u32, denominator: u32, } impl PixelScale { pub const ONE: Self = Self { numerator: 1, denominator: 1, }; pub fn new(numerator: u32, denominator: u32) -> Self { Self { numerator, denominator, } } pub fn is_fractional(&self) -> bool { (self.numerator % self.denominator) != 0 } pub fn round_up(self) -> Self { Self { numerator: self.numerator.next_multiple_of(self.denominator) / self.denominator, denominator: 1, } } } impl std::fmt::Display for PixelScale { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:.1}", self.numerator as f64 / self.denominator as f64) } } impl TryFrom for PixelScale { type Error = ValidationError; fn try_from(scale: protocol::PixelScale) -> Result { if scale.denominator == 0 && scale.numerator != 0 { Ok(Self::ONE) } else if scale.denominator == 0 || scale.numerator == 0 { Err(ValidationError::Required("denominator".to_string())) } else { Ok(Self { numerator: scale.numerator, denominator: scale.denominator, }) } } } impl From for protocol::PixelScale { fn from(scale: PixelScale) -> Self { Self { numerator: scale.numerator, denominator: scale.denominator, } } } ================================================ FILE: mm-client-common/src/session.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::time; use mm_protocol as protocol; pub use protocol::ApplicationImageFormat; use crate::display_params; use crate::validation::*; /// A launchable application on the server. #[derive(Debug, Clone, PartialEq, Eq, uniffi::Record)] pub struct Application { pub id: String, pub description: String, pub folder: Vec, pub images_available: Vec, } impl TryFrom for Application { type Error = ValidationError; fn try_from(value: protocol::application_list::Application) -> Result { let images_available = value .images_available .into_iter() .map(|v| match v.try_into() { Err(_) | Ok(protocol::ApplicationImageFormat::Unknown) => { Err(ValidationError::InvalidEnum("images_available".into())) } Ok(v) => Ok(v), }) .collect::, _>>()?; Ok(Application { id: value.id, description: value.description, folder: value.folder, images_available, }) } } /// A running session on the server. #[derive(Debug, Clone, PartialEq, Eq, uniffi::Record)] pub struct Session { pub id: u64, pub application_id: String, pub start: time::SystemTime, pub display_params: display_params::DisplayParams, } impl TryFrom for Session { type Error = ValidationError; fn try_from(msg: protocol::session_list::Session) -> Result { let start = match required_field!(msg.session_start)?.try_into() { Ok(ts) => Ok(ts), Err(_) => Err(ValidationError::InvalidTimestamp( "session_start".to_string(), )), }?; Ok(Session { id: msg.session_id, application_id: msg.application_id, start, display_params: required_field!(msg.display_params)?.try_into()?, }) } } ================================================ FILE: mm-client-common/src/stats.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::{sync::atomic::AtomicU64, time}; #[derive(Default)] pub(crate) struct StatsCollector { pub(crate) bytes_tx: AtomicU64, pub(crate) bytes_rx: AtomicU64, pub(crate) rtt_us: AtomicU64, } impl StatsCollector { pub(crate) fn snapshot(&self) -> ClientStats { let rtt_us = self.rtt_us.load(std::sync::atomic::Ordering::SeqCst); ClientStats { bytes_tx: self.bytes_tx.load(std::sync::atomic::Ordering::SeqCst), bytes_rx: self.bytes_rx.load(std::sync::atomic::Ordering::SeqCst), rtt: time::Duration::from_micros(rtt_us), } } } /// A snapshot of the client's connection statistics. #[derive(uniffi::Record, Clone, Copy)] pub struct ClientStats { pub bytes_tx: u64, pub bytes_rx: u64, pub rtt: time::Duration, } ================================================ FILE: mm-client-common/src/validation.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT #[derive(Debug, Clone, thiserror::Error)] pub enum ValidationError { #[error("{0} must not be null")] Required(String), #[error("{0}: invalid enum value")] InvalidEnum(String), #[error("{0}: invalid timestamp")] InvalidTimestamp(String), } macro_rules! required_field { ($msg:ident.$field:ident) => { $msg.$field .ok_or(crate::validation::ValidationError::Required( stringify!($ident).to_string(), )) }; } pub(crate) use required_field; ================================================ FILE: mm-docgen/Cargo.toml ================================================ [package] name = "mmserver-config-docgen" version = "0.1.0" edition = "2021" [[bin]] name = "config-docgen" [[bin]] name = "protocol-docgen" [dependencies] regex = "1" ================================================ FILE: mm-docgen/src/bin/config-docgen.rs ================================================ //! Generates markdown docs from mmserver.default.toml. Tightly coupled //! to the format of that file. use std::{ fs::File, io::{BufRead as _, BufReader}, }; use regex::Regex; const FRONT_MATTER: &str = r#" +++ title = "Configuration Reference" [extra] toc = true +++ "#; fn main() { let mut args = std::env::args(); if args.len() != 2 { eprintln!("usage: {} SRC", args.next().unwrap()); std::process::exit(1); } let _ = args.next().unwrap(); let src = args.next().unwrap(); let r = BufReader::new(File::open(src).expect("source path does not exist")); let mut preamble = true; let mut key_path: Vec = Vec::new(); let mut docs = Vec::new(); let keypath_section_re = Regex::new(r"\A#?\s*\[([a-z0-9-_.]+)\]\s*\z").unwrap(); let key_re = Regex::new(r"\A(#?)\s*([a-z0-9-_]+)\s=\s(.*)\z").unwrap(); println!("{}", FRONT_MATTER); for line in r.lines() { let s = line.expect("io error"); if s.is_empty() { preamble = false; for doc in docs.drain(..) { println!("{}", doc); } continue; } else if preamble { continue; } if let Some(header) = s.strip_prefix("## *** ") { // Documentation sections. println!("\n## {}", header.strip_suffix(" ***").unwrap()); } else if s.starts_with("## ***") { // Section decoration. continue; } else if let Some(doc) = s.strip_prefix("##") { // Key documentation. docs.push(doc.trim_start().to_owned()); } else if let Some(m) = key_re.captures(&s) { // Key, value. let is_default = m.get(1).unwrap().is_empty(); let key = m.get(2).unwrap().as_str(); let value = m.get(3).unwrap().as_str(); let full_path = key_path .iter() .map(String::as_str) .chain(key.split('.')) .collect::>() .join("."); println!("\n#### `{}`\n", full_path); if is_default { println!("```toml\n# Default\n{} = {}\n```\n", key, value); } else { println!( "```toml\n# Example (default unset)\n{} = {}\n```\n", key, value ); } for doc in docs.drain(..) { println!("{}", doc); } } else if let Some(m) = keypath_section_re.captures(&s) { // Update keypath for TOML section headers. key_path.clear(); for key in m.get(1).unwrap().as_str().split(".") { // Example app becomes in the docs. if key == "steam-big-picture" { key_path.push("".to_owned()); } else { key_path.push(key.to_owned()); } } } else { eprintln!("error: unmatched line: \n{}", s); std::process::exit(1); } } } ================================================ FILE: mm-docgen/src/bin/protocol-docgen.rs ================================================ //! Generates markdown docs from mm-protoco/src/messages.proto. Tightly coupled //! to the format of that file. use std::{ fs::File, io::{BufRead as _, BufReader}, }; const FRONT_MATTER: &str = r#" +++ title = "Protocol Reference" [extra] toc = true +++ "#; fn main() { let mut args = std::env::args(); if args.len() != 2 { eprintln!("usage: {} SRC", args.next().unwrap()); std::process::exit(1); } let _ = args.next().unwrap(); let src = args.next().unwrap(); let r = BufReader::new(File::open(src).expect("source path does not exist")); println!("{}", FRONT_MATTER); // Skip until the first

. let mut message_lines = Vec::new(); let mut comment_lines = Vec::new(); for line in r .lines() .skip_while(|s| !s.as_ref().unwrap().starts_with("// # ")) { let line = line.unwrap(); if message_lines.is_empty() && line.is_empty() { emit_comments(&mut comment_lines); println!(); } else if let Some(comment) = line.strip_prefix("// ").or_else(|| line.strip_prefix("//")) { emit_message_code_block(&mut message_lines); comment_lines.push(comment.to_owned()); } else if !line.contains("TODO") { emit_comments(&mut comment_lines); message_lines.push(line); } } emit_comments(&mut comment_lines); emit_message_code_block(&mut message_lines); } fn emit_comments(lines: &mut Vec) { let comment = lines.join("\n"); // Add internal links. let comment = regex::Regex::new(r"`(?s)(\d+)\s+-\s+([\w\s]+)`") .unwrap() .replace_all(&comment, |caps: ®ex::Captures<'_>| { let slug = caps[2] .to_lowercase() .split_whitespace() .collect::>() .join("-"); format!("[{}](#{}-{})", &caps[0], &caps[1], slug) }); println!("{}", comment); lines.clear(); } fn emit_message_code_block(lines: &mut Vec) { if !lines.is_empty() { let message = lines.join("\n"); println!("\n```proto\n{}\n```\n", message.trim()); lines.clear(); } } ================================================ FILE: mm-protocol/Cargo.toml ================================================ # Copyright 2024 Colin Marc # # SPDX-License-Identifier: MIT [package] name = "mm-protocol" version = "0.3.0" edition = "2021" license = "MIT" [dependencies] bytes = "1" octets = "0.2" prost = "0.13" thiserror = "1" uniffi = { version = "0.28", optional = true } [build-dependencies] prost-build = "0.13" [features] uniffi = ["dep:uniffi"] ================================================ FILE: mm-protocol/build.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT fn main() -> std::io::Result<()> { let mut conf = prost_build::Config::new(); #[cfg(feature = "uniffi")] conf.enum_attribute(".", "#[derive(uniffi::Enum)]"); conf.bytes(["."]) .include_file("_include.rs") .compile_protos(&["src/messages.proto"], &["src/"])?; Ok(()) } ================================================ FILE: mm-protocol/src/lib.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use prost::Message as _; #[cfg(feature = "uniffi")] uniffi::setup_scaffolding!(); include!(concat!(env!("OUT_DIR"), "/_include.rs")); pub use messages::*; mod timestamp; #[derive(Debug, thiserror::Error)] enum ProtobufError { #[error(transparent)] ProtobufDecode(#[from] prost::DecodeError), #[error(transparent)] ProtobufEncode(#[from] prost::EncodeError), } #[derive(Debug, Clone, thiserror::Error)] pub enum ProtocolError { #[error("protobuf encode error: {0}")] ProtobufEncode(#[from] prost::EncodeError), #[error("protobuf decode error: {0}")] ProtobufDecode(#[from] prost::DecodeError), #[error("short buffer, need {0} bytes")] ShortBuffer(usize), #[error("invalid message")] InvalidMessage, #[error("invalid message type: {0} (len={1})")] InvalidMessageType(u32, usize), } /// The maximum size of a single message. Note that a lower limit may apply to /// messages sent as datagrams, based on the connection MTU and QUIC's overhead. pub const MAX_MESSAGE_SIZE: usize = 1048576; /// The current protocol version. pub const ALPN_PROTOCOL_VERSION: &[u8] = b"mm00"; // This is a very simplified version of the enum_dispatch macro. macro_rules! message_types { ($($num:expr => $variant:ident),*,) => { /// A protocol message. #[repr(u32)] #[derive(Clone, Debug, PartialEq)] pub enum MessageType { $($variant($variant) = $num),* } impl std::fmt::Display for MessageType { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { $(MessageType::$variant(_) => write!(f, "{}:{}", $num, stringify!($variant))),* } } } impl MessageType { fn message_type(&self) -> u32 { match self { $(MessageType::$variant(_) => $num),* } } fn encoded_len(&self) -> usize { match self { $(MessageType::$variant(v) => v.encoded_len()),* } } fn encode(&self, buf: &mut B) -> Result<(), ProtocolError> where B: bytes::BufMut, { let res = match self { $(MessageType::$variant(v) => v.encode(buf)),* }; res.map_err(|e| e.into()) } fn decode(msg_type: u32, total_len: usize, buf: B) -> Result { match msg_type { $($num => Ok($variant::decode(buf)?.into())),*, _ => Err(ProtocolError::InvalidMessageType(msg_type, total_len)), } } } $(impl From<$variant> for MessageType { fn from(v: $variant) -> Self { MessageType::$variant(v) } })* }; } message_types! { 1 => Error, 11 => ListApplications, 12 => ApplicationList, 13 => LaunchSession, 14 => SessionLaunched, 15 => UpdateSession, 16 => SessionUpdated, 17 => ListSessions, 18 => SessionList, 19 => EndSession, 20 => SessionEnded, 21 => FetchApplicationImage, 22 => ApplicationImage, 30 => Attach, 31 => Attached, 32 => KeepAlive, 33 => SessionParametersChanged, 35 => Detach, 51 => VideoChunk, 52 => RequestVideoRefresh, 56 => AudioChunk, 60 => KeyboardInput, 61 => PointerEntered, 62 => PointerLeft, 63 => PointerMotion, 64 => PointerInput, 65 => PointerScroll, 66 => UpdateCursor, 67 => LockPointer, 68 => ReleasePointer, 69 => RelativePointerMotion, 70 => GamepadAvailable, 71 => GamepadUnavailable, 72 => GamepadMotion, 73 => GamepadInput, } /// Reads a header-prefixed message from a byte slice, and returns the number /// of bytes consumed. Returns ProtocolError::ShortBuffer if the buffer /// contains a partial message. pub fn decode_message(buf: &[u8]) -> Result<(MessageType, usize), ProtocolError> { if buf.len() < 10 { return Err(ProtocolError::ShortBuffer(10)); } let (msg_type, data_off, total_len) = { let mut hdr = octets::Octets::with_slice(&buf[..10]); let remaining = get_varint32(&mut hdr)? as usize; let prefix_off = hdr.off(); let msg_type = get_varint32(&mut hdr)?; let off = hdr.off(); (msg_type, off, prefix_off + remaining) }; if msg_type == 0 || total_len == 0 || total_len > MAX_MESSAGE_SIZE || data_off > total_len { return Err(ProtocolError::InvalidMessage); } else if data_off > buf.len() || total_len > buf.len() { return Err(ProtocolError::ShortBuffer(total_len)); } let padded_len = total_len.max(10); let msg = MessageType::decode(msg_type, padded_len, &buf[data_off..total_len])?; Ok((msg, padded_len)) } /// Writes a header-prefixed message to a byte slice, and returns the number /// of bytes used. Returns ProtocolError::ShortBuffer if the slice doesn't have /// enough capacity. pub fn encode_message(msg: &MessageType, buf: &mut [u8]) -> Result { let msg_type = msg.message_type(); let msg_len = u32::try_from(msg.encoded_len()).map_err(|_| ProtocolError::InvalidMessage)? as usize; let header_len = encode_header(msg_type, msg_len, buf)?; let total_len = header_len + msg_len; let mut msg_buf = &mut buf[header_len..]; msg.encode(&mut msg_buf)?; if total_len < 10 { buf[total_len..].fill(0); Ok(10) } else { Ok(total_len) } } fn encode_header(msg_type: u32, msg_len: usize, buf: &mut [u8]) -> Result { let msg_type_len = octets::varint_len(msg_type as u64); let prefix_len = octets::varint_len((msg_type_len + msg_len) as u64); let total_len = prefix_len + msg_type_len + msg_len; if total_len > MAX_MESSAGE_SIZE { return Err(ProtocolError::InvalidMessage); } else if total_len > buf.len() || buf.len() < 10 { return Err(ProtocolError::ShortBuffer(std::cmp::max(total_len, 10))); } let off = { let mut hdr = octets::OctetsMut::with_slice(buf); hdr.put_varint((msg_type_len + msg_len) as u64).unwrap(); hdr.put_varint(msg_type as u64).unwrap(); hdr.off() }; Ok(off) } // get_varint correctly handles u64 varints, but the protocol specifies u32. fn get_varint32(buf: &mut octets::Octets) -> Result { let x = match buf.get_varint() { Ok(x) => x, Err(_) => return Err(ProtocolError::InvalidMessage), }; u32::try_from(x).map_err(|_| ProtocolError::InvalidMessage) } #[cfg(test)] mod tests { use super::*; macro_rules! test_roundtrip { ($name:ident : $value:expr) => { #[test] fn $name() { let msg = $value.into(); let mut buf = [0; MAX_MESSAGE_SIZE]; let len = encode_message(&msg, &mut buf).unwrap(); let (decoded_msg, decoded_len) = decode_message(&buf).unwrap(); assert_eq!(msg, decoded_msg); assert_eq!(len, decoded_len); } }; } test_roundtrip!(test_roundtrip_detach: Detach {}); test_roundtrip!(test_roundtrip_error: Error { err_code: 1, error_text: "test".to_string(), }); test_roundtrip!(test_roundtrip_smallframe: VideoChunk { attachment_id: 0, session_id: 1, stream_seq: 1, seq: 2, chunk: 3, num_chunks: 4, data: bytes::Bytes::from(vec![9; 52]), timestamp: 1234, ..Default::default() }); test_roundtrip!(test_roundtrip_frame: VideoChunk { attachment_id: 0, session_id: 1, stream_seq: 1, seq: 2, chunk: 3, num_chunks: 4, data: bytes::Bytes::from(vec![9; 1200]), timestamp: 1234, hierarchical_layer: 0, ..Default::default() }); #[test] fn invalid_message_type() { let msg_type = 999; let msg_buf = [100_u8; 322]; let msg_len = msg_buf.len(); // Create a fake message with a msg_type of 999. let mut buf = [0; MAX_MESSAGE_SIZE]; let header_len = encode_header(msg_type, msg_len, &mut buf).expect("failed to encode fake message"); let total_len = header_len + msg_len; buf[header_len..total_len].copy_from_slice(&msg_buf); match decode_message(&buf) { Err(ProtocolError::InvalidMessageType(t, len)) => { assert_eq!(t, 999); assert_eq!(len, total_len); } v => panic!("expected InvalidMessageType, got {:?}", v), } } } ================================================ FILE: mm-protocol/src/messages.proto ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT syntax = "proto3"; package messages; // # The Magic Mirror Streaming Protocol (MMSP) version 0.3.0 // // This document describes a protocol for remote application streaming. Using // this protocol, a client can remotely launch and/or attach an application on a // server accessible only via network connection and display the output locally, // while sending input commands. // // ## Differences to traditional remote desktop protocols // // Individual sessions in the protocol concern applications, rather than desktop // environments. Although not part of the protocol itself, server // implementations are expected to render applications offscreen, rather than // displaying a central, shared desktop either in whole or part. This allows // multiple sessions to coexist without interfering with each other. // // Following from this, session parameters such as framerate and resolution are // set by the client, not the server. // // Note that nothing stops a session from containing an entire desktop // environment. The protocol makes no general distinction between desktop // environments and other applications, such as GPU-accelerated games. It does, // however, provide some built-in support for client-side cursor and // wireframe rendering, as well as clipboard operations, in order to improve the // remote desktop experience should the server and desktop support it. TODO: no // wireframe or clipboard support yet // // ## Protocol basics // // At a high level, the protocol consists of messages passed bidirectionally // over a QUIC connection. // // Servers and clients should use the `mm00` ALPN identifier. The number // will increase in future revisions of the protocol - see 'Protocol Versioning' // below. // // A message is an arbitrary-length byte blob, beginning with two unsigned // varints. The first varint holds the total length of the rest of the message, // including the bytes required to hold the next varint. The second is a message // type. The remaining bytes are a protobuf-encoded message matching the message // type and this specification. // // [ 1-5(A) bytes: prefixed length (N) ] // [ 1-5(B) bytes: message type ] // [ N - B bytes, protobuf-encoded ] // [ max(0, 10 - N - A) bytes, padding ] // // The total length of a message must not exceed 1MiB (1048576 bytes), and the // message type must fit in an unsigned 32-bit integer. Neither value may be // zero. Therefore, the minimum size for each varint is 1 byte, and the maximum // is 5 bytes. The length of the message itself may be zero bytes, if the // message type has no required fields. However, if a message would be less than // 10 bytes, it should be padded with zeroes to ten bytes before being written // to the stream. The length N should not include those bytes. // // The protocol generally uses a single QUIC bidirectional stream for each // session attachment, and describes in the documentation for each message type // which stream it should use. The protocol also optionally makes use of the // [QUIC DATAGRAM extension][quic_datagram] extension, in particular for video // and audio frames. In the case that a message is sent in a datagram frame, the // max length must not exceed QUIC's maximum datagram size. // // QUIC streams are, by their nature, ordered, so messages sent in sequence in a // stream may be considered ordered. However, client and server messages are not // considered ordered with respect to each other, and messages sent as datagrams // are not inherently considered ordered with respect to any stream messages. // Where ordering is important, the protocol includes hints for the client and // server. // // Unless specified in the documentation below, all message fields are optional, // and their absence (in the form of empty values) should be handled gracefully // by the receiver. A field marked as required may not be empty. // // ## Protocol Versioning // // The protocol follows Semantic Versioning rules, as defined by this document: // // // As such, servers should support clients using the same major version of the // protocol, and vice versa, with the exception of major version 0 (the current // version), for which these rules are relaxed. // // Compatibility considers the QUIC features used, protobuf wire compatibility // (such as changes to field tags, but not field or message naming), and // required/optional semantics of messages as documented (in particular, adding // a field that is documented as required is a breaking change). // // # Message types // // ## Common types // // Some protobuf messages are reused in multiple messages below. // ### Timestamp // // Represents an instant independent of time zone or local calendar, represented // as the sum of seconds and nanoseconds since the UNIX epoch of January 1st, // 1970. message Timestamp { int64 seconds = 1; // Required. int64 nanos = 2; // Required. } // ### Size, Extent // // A `Size` is a width, height tuple, mainly used to describe areas. An `Extent` // includes a starting position. The coordinate space depends on where these // structs are used, but should always be oriented with [0, 0] in the top left // position. message Size { uint32 width = 1; // Required. uint32 height = 2; // Required. } message Extent { uint32 x = 1; uint32 y = 2; uint32 width = 3; // Required. uint32 height = 4; // Required. } // ### Pixel Scale // // Represents a rational number, used in the context of HiDPI displays. // Fractions less than one are not allowed. For example, a pixel density of 1.5 // would be represented as 3/2. message PixelScale { uint32 numerator = 1; // Required. uint32 denominator = 2; // Required. } // ### Virtual Output Params // // Represents the configuration of a virtual display, which is required to // launch a session. message VirtualDisplayParameters { Size resolution = 1; // Required. uint32 framerate_hz = 2; // Required. PixelScale ui_scale = 3; // Required. } // ### Attachment type // // This refers to the manner of attachment. enum AttachmentType { ATTACHMENT_TYPE_UNKNOWN = 0; ATTACHMENT_TYPE_OPERATOR = 1; ATTACHMENT_TYPE_VIEWER = 2; } // ### Video codec // // This refers to the codec used for a video stream. enum VideoCodec { VIDEO_CODEC_UNKNOWN = 0; VIDEO_CODEC_H264 = 1; VIDEO_CODEC_H265 = 2; VIDEO_CODEC_AV1 = 3; } // ### Video profile // // This refers to the profile used for a video stream. Profiles are fully // defined in the output section, below. enum VideoProfile { VIDEO_PROFILE_UNKNOWN = 0; VIDEO_PROFILE_HD = 1; VIDEO_PROFILE_HDR10 = 2; } // ### Audio codec // // This refers to the codec used for an audio stream. enum AudioCodec { AUDIO_CODEC_UNKNOWN = 0; AUDIO_CODEC_OPUS = 1; } // ### Audio channels // // This defines a map of channels to speaker positions. message AudioChannels { enum Channel { CHANNEL_MONO = 0; CHANNEL_FRONT_LEFT = 1; CHANNEL_FRONT_RIGHT = 2; CHANNEL_FRONT_CENTER = 3; CHANNEL_REAR_CENTER = 4; CHANNEL_REAR_LEFT = 5; CHANNEL_REAR_RIGHT = 6; CHANNEL_LFE = 7; CHANNEL_FRONT_LEFT_OF_CENTER = 8; CHANNEL_FRONT_RIGHT_OF_CENTER = 9; CHANNEL_SIDE_LEFT = 10; CHANNEL_SIDE_RIGHT = 11; } repeated Channel channels = 1; } // ### FEC Scheme // // Indicates a Forward Error Correction scheme used to protect packets, // and contains any data needed to locate a chunk within a packet. // // Definitions for the concepts can be found in RFCs 3452 and 5052. message FECMetadata { enum FECScheme { FEC_SCHEME_UNKNOWN = 0; // Uses the RaptorQ scheme defined in RFC 6630. FEC_SCHEME_RAPTORQ = 6; } // Required. Indicates the scheme used. FECScheme fec_scheme = 1; // Required. Contains the scheme-specific serialized Payload ID. bytes fec_payload_id = 2; // Required. Contains the scheme-specific serialized Object Transmission // Information (OTI). bytes fec_oti = 3; } // ### Gamepad // // A gamepad ID and metadata. message Gamepad { enum GamepadLayout { GAMEPAD_LAYOUT_UNKNOWN = 0; GAMEPAD_LAYOUT_GENERIC_DUAL_STICK = 1; GAMEPAD_LAYOUT_SONY_DUALSHOCK = 2; } uint64 id = 1; // Required. GamepadLayout layout = 2; // Required. } // ### Application Image Format // // Distinguishes between different images associated with an application. enum ApplicationImageFormat { APPLICATION_IMAGE_FORMAT_UNKNOWN = 0; // A roughly 400x200 image for display in a list of applications. APPLICATION_IMAGE_FORMAT_HEADER = 1; } // ## Errors and exceptions // ### 001 - Error // // This message may be sent by a server or client at any time on any stream. message Error { enum ErrorCode { ERROR_UNKNOWN = 0; // Used to indicate an unrecoverable error on the server. ERROR_SERVER = 10; // Used to indicate a protocol violation. ERROR_PROTOCOL = 20; ERROR_PROTOCOL_UNEXPECTED_MESSAGE = 21; ERROR_PROTOCOL_INCORRECT_STREAM = 22; ERROR_PROTOCOL_UNKNOWN_MESSAGE_TYPE = 23; ERROR_TIMEOUT = 24; ERROR_APPLICATION_NOT_FOUND = 25; ERROR_APPLICATION_NO_IMAGE = 26; // Used to indicate that the server refuses to launch a session. ERROR_SESSION_LAUNCH_FAILED = 30; ERROR_SESSION_LAUNCH_REFUSED = 31; // Used to indicate the session update couldn't be applied. ERROR_SESSION_UPDATE_FAILED = 32; // Used to indicate that the server refuses to allow the client to attach // to the session. ERROR_ATTACHMENT_REFUSED = 40; ERROR_ATTACHMENT_PARAMS_NOT_SUPPORTED = 41; // Used to indicate that the session has ended. ERROR_SESSION_ENDED = 50; ERROR_SESSION_ENDED_BY_CLIENT = 51; ERROR_SESSION_ENDED_APPLICATION_EXIT = 52; // Used for several session operations. ERROR_SESSION_NOT_FOUND = 60; ERROR_SESSION_INVALID_STATE = 61; ERROR_SESSION_PARAMS_NOT_SUPPORTED = 62; // Used to indicate a failed authentication attempt or ignored challenge. ERROR_AUTHENTICATION_FAILED = 100; // Used to indicate missing or insufficient credentials on another request. ERROR_NOT_ALLOWED = 101; } ErrorCode err_code = 1; // Required. string error_text = 3; } // ## Sessions and attachments // // A session represents a running application on the server. Creating a session // launches the application in the background. After the client *attaches* to // the session, then and only then must the server start sending video and audio // frames. These frames may either be on the attachment stream or sent // separately as QUIC datagrams. // // If supported by the server and application, sessions may have multiple // attachments, grouped into "operators" and "viewers". // // ### Render vs. streaming resolution // // Sessions are defined by a render resolution (with framerate and scale, // collectively referred to as the virtual display parameters), while individual // attachments are defined by a streaming resolution. The former results in the // resolution of the output texture the application renders to, while the latter // refers to the dimensions of the compressed video stream. // // Servers must support streaming at the exact render resolution, but they may // also optionally support different render and streaming resolutions. The most // common use case for this would be to render at a "super resolution", ie an // integer multiple of the streaming resolution, to improve quality in // environments with limited bandwidth, or to support "preview" attachments // which stream at a very low resolution. // // Servers must either obey the requested render resolution or reject the // corresponding `013 - Launch Session` or `015 - Update Session` message with // an error. Similarly, servers must either obey the requested streaming // resolution or reject the corresponding `030 - Attach` message. // // Servers must always emit encoded frames at the virtual display framerate. // ### Resolution changes // // Servers may choose to update the render resolution of a session at any time, // for example at the request of a client, or in the case that an app requests a // new resolution. Servers must inform existing attachments of the new // resolution using the `033 - Session Parameters Changed` message. // Additionally, if the streaming resolution of existing attachments is no // longer compatible with the new resolution, the server may indicate that in // the message. // // ### HiDPI passthrough // // Clients on screens with a pixel density higher than one may inform the server // at session creation time, or request a change to an existing session with // `015 - Update Session`. In any case, the render resolution specified is still // the final resolution, not the "logical" resolution. For example, a client // requesting a `render_resolution` of 2560x1600 with a UI scale of 2 would // still result in a render resolution of 2560x1600; the UI scale should be // passed as a hint to the application in whatever platform-specific way makes // sense. This is important because many applications are able to automatically // scale UI elements or make other user-experience improvements subject to UI // scale. // // ### Quality preset // // Clients can use the `quality_preset` field of the `030 - Attach` message to // tune the quality of the stream, which is inversely related to the bandwidth // usage. The value ranges from 1 to 10, with 1 indicating that the client // wishes the server to optimize for the the lowest possible bandwidth usage, // and 10 indicating that the client wishes the server to optimize for the // highest possible quality. How these values are interpreted is determined by // the server. // // ### Concurrent attachments // // Servers may support multiple concurrent attachments from different clients, // for example to support secondary "viewer" attachments. If the parameters of // the attachments differ, the server may choose to encode multiple streams at // different resolutions, or it may simply choose one (the operator's attachment // parameters should take precedence) and use that for all attachments. // TODO: attachments should probably be distinct for audio and video, so that // reattaching doesn't cause audio to skip // ### 011 - List Applications // // This message, which must originate from the client on a new stream, requests // a list of available applications to launch as sessions. The server must // either respond with an `012 - Application List` message or an `001 - Error` // message on the same stream. message ListApplications {} // ### 012 - Application List // // This message, which must originate from the server on the same stream as a // corresponding `011 - List Applications` message, indicates the list of // available applications to launch as sessions. message ApplicationList { message Application { string id = 1; // Required. Must be unique. string description = 2; // A list of path components, used to group applications for display. repeated string folder = 3; // If set, the image can be fetched with a `021 - Fetch Application Image` // message. repeated ApplicationImageFormat images_available = 4; } repeated Application list = 1; } // ### 013 - Launch Session // // This message, which must originate from the client on a new stream, requests // that the server launch the application specified by `id`. The id should match // the id of an application returned by `012 - Application List`. // // The server must either launch a session, replying with `014 - Session // Launched` once the session has started and is available to attach, or send an // `001 - Error` message on the same stream indicating why it refuses to do so. message LaunchSession { // Required; must match the id of an application returned in "12 - Application // List". string application_id = 1; VirtualDisplayParameters display_params = 10; // Required. // Any gamepads that should be available at the start of the session. This is // sometimes important for applications that don't correctly support // hotplugged devices. // // These gamepads should be considered permanently connected, and // GamepadUnavailable events should be ignored for them. repeated Gamepad permanent_gamepads = 20; } // ### 014 - Session Launched // // This message, which must originate from the server on the same stream as the // corresponding `013 - Launch Session` message, indicates that the session has // successfully launched and may be attached. message SessionLaunched { uint64 id = 1; // Required. // Required. Must include at least the `render_resolution` specified in the // corresponding `013 - Launch Session` message. repeated Size supported_streaming_resolutions = 10; // TODO supported_sample_rate? } // ### 015 - Update Session // // This message, which must originate from the client on a new stream, requests // that the server update the parameters of a running session. An ommitted value // indicates that the existing setting should remain. The server must respond // with either `016 - Session Updated` or `001 - Error` on the same stream. message UpdateSession { uint64 session_id = 1; // Required. VirtualDisplayParameters display_params = 10; } // ### 016 - Session Updated // // This message, which must originate from the server on the same stream as the // corresponding `015 - Update Session` message, indicates that the requested // update was successfully applied. message SessionUpdated {} // ### 017 - List Sessions // // This message, which must originate from the client on a new stream, requests // a list of attachable sessions. The server must respond with either `018 - // Session List` or an `001 - Error` on the same stream. message ListSessions {} // ### 018 - Session List // // This message, which must originate from the server on the same stream as the // corresponding `017 - List Sessions` request, indicates a list of attachable // sessions to the client. message SessionList { message Session { uint64 session_id = 1; // Required. string application_id = 2; // Required. Timestamp session_start = 3; // Required. VirtualDisplayParameters display_params = 10; // Required. // Required. Must include at least the `render_resolution` of the session. repeated Size supported_streaming_resolutions = 13; // Required if any were set in the original `013 - Launch Session` event. repeated Gamepad permanent_gamepads = 20; // TODO attachable type? // TODO existing attachments? } repeated Session list = 1; } // ### 019 - End Session // // This message, which must originate from the client on a new stream, requests // that the server end the named session and detach all clients. // // If a server chooses to comply, it should send `001 - Error` messages to all // other attached clients (with ERR_SESSION_ENDED_BY_CLIENT), and an `020 - // Session Ended` message on this stream. Otherwise, it should send an `001 - // Error` message on this stream. message EndSession { uint64 session_id = 1; // Required. } // ### 020 - Session Ended. // // This message, which must originate from the server on the same stream as the // corresponding `019 - End Session` message, confirms that the session has been // ended. message SessionEnded {} // ### 021 - Fetch Application Image // // This message, which must originate from the client on a new stream, requests // image metadata for an application. The Server must respond with either an // `022 - Application Image` message or an `001 - Error` message on the same // stream. message FetchApplicationImage { string application_id = 1; // Required. ApplicationImageFormat format = 2; // Required. } // ### 022 - Application Image // // This message, which must originate from the server on the same stream as the // corresponding `021 - Fetch Application Image` message, sends the requested // image data to the client. message ApplicationImage { // Required. Must be a complete PNG file and less than 1048576 bytes. Either // restriction may be lifted or in the future. bytes image_data = 1; } // ### 030 - Attach // // This message, which must originate from the client on a new stream, requests // that the server attach the client to the named session. Upon receipt of this // request, the server must either refuse the attachment with an `001 - Error` // message, or send an `031 - Attached` message on the same stream and start // sending video and audio packets to the client. // // Ommitted fields indicate that the server should choose the parameters. // // The server may choose to reject the attachment for any reason, including but // not limited to: // // - The output parameters, such as resolution or codec, are invalid or not // supported. // - The server already has a client attached to that session, and wishes to // limit the number of attachments (or doesn't support multiple attachments). // - The authentication so far provided doesn't grant the client access to that // session with that attachment type. message Attach { uint64 session_id = 1; // Required. AttachmentType attachment_type = 2; // Required. string client_name = 3; VideoCodec video_codec = 10; Size streaming_resolution = 11; VideoProfile video_profile = 12; uint32 quality_preset = 13; // Must be in the range 1-10. AudioCodec audio_codec = 15; AudioChannels channels = 16; uint32 sample_rate_hz = 17; } // ### 031 - Attached // // This message, which must originate from the server on the same stream as the // original `030 - Attach` message, indicates that the // server accepts the client and will begin streaming with the client's // requested parameters. The parameters must match the parameters sent in the // original `030 - Attach` message, or represent the server-chosen default if // they were ommitted. message Attached { uint64 session_id = 1; // Required. uint64 attachment_id = 2; // Required. VideoCodec video_codec = 10; // Required. Size streaming_resolution = 11; // Required. VideoProfile video_profile = 12; // Required. uint32 quality_preset = 13; // Required. AudioCodec audio_codec = 15; // Required. AudioChannels channels = 16; // Required. uint32 sample_rate_hz = 17; // Required. } // ### 032 - Keep Alive // // This message, which must originate from the client on the stream where the // original `030 - Attach` message was sent, indicates that the client // is still attached. The server may take the absence of a regular `Keep Alive` // message to indicate that the client has gone away should be considered // detached. message KeepAlive {} // ### 033 - Session Parameters Changed // // This message, which must originate from the server on the same stream as the // original `030 - Attach` message, indicates that the parameters of the // attached session have changed. If `reattach_required` is set to true, the // client should consider the attachment to be ended and reattach with new // parameters. message SessionParametersChanged { bool reattach_required = 1; VirtualDisplayParameters display_params = 10; // Required. Must include at least the `render_resolution` of the session. repeated Size supported_streaming_resolutions = 13; } // ### 035 - Detach // // This message, which must originate from the client on the stream where the // original `030 - Attach` message was sent, indicates that the client // wishes to detach and end streaming. Upon receipt of this message, the server // must stop streaming frames or accepting input on the attachment stream. message Detach {} // ## Output // // This section pertains to the application output, streamed from server to // client. // // Output packets, whether audio or video, are always part of a session, an // attachment, and a stream. A session may have multiple attachments, and an // attachment may periodically restart its audio or video stream, resulting in a // new stream. As packets may be too large to send in one datagram, they may be // chunked by the server. Therefore, a fourth identifier, a packet sequence // number, is used to group chunks in a sequence of potentially unordered // datagrams. // // All four identifiers (session, attachment, stream, and packet) should be // considered opaque to the client. However, the stream and packet sequence // numbers should only increase monotonically as new packets and new streams are // created. See the section below for more detail. // // The contents of each packet are opaque, and depend on the codec being used. // // Servers should only send packets for one video and one audio stream for one // attachment at a time. // // ### Datagram support // // If both server and client support the QUIC Datagram extension (RFC 9221), // then output packets should be sent as datagrams. If either client or server // do not support datagrams, the chunks must be sent on the same stream as the // original `030 - Attach` message was sent. // // Since datagrams are not associated with any particular QUIC stream, the // `session_id` and `attachment_id` fields of the below messages may be // necessary to disambiguate received chunks. However, to reduce overhead, a // server may omit both fields if sending chunks on the original attachment // stream, rather than as datagrams. // // ### Multiple attachments // // To determine video stream parameters in the case of multiple concurrent // attachments to the same session, operator streams should take precedence. // // ### Video compression // // The following apply to all supported video codecs: // // - The server must tag the video bitstream with resolution, framerate, and // YCbCr color space/range using whatever mechanism is supported by the codec // (for example, PPS/VUI frames in H.264). Clients should use this // information to verify that the parameters match the requested attachment // parameters. // - The server must use YCbCr 4:2:0 chroma subsampling for the compressed // stream (this is sometimes called YUV420P, and is the default for most // implementations of H264, H265, and AV1). // - For VIDEO_PROFILE_HD, a bit depth of 8, along with the Rec.709 color space // and limited range must be used. For H.264, H.265, and AV1, this // corresponds to `colour_primaries`, `transfer_characteristics`, and // `matrix_coeffs` all equal to 1, and the `video_full_range_flag` set to 0 // (named `color_range` for AV1). // - For VIDEO_PROFILE_HDR10, a bit depth of 10, along with the Rec. 2100 color // space and limited range must be used. For H.264, H.265, and AV1, this // corresponds to `colour_primaries` and `matrix_coeffs` equal to 9, // `transfer_characteristics` equal to 16, and the `video_full_range_flag` // set to 0 (named `color_range` for AV1). The server should additionally use // SEI headers (or metadata OBUs for AV1) to communicate HDR metadata such as // mastering display color volume (MDCV) and content light level (CLL) // information. // - The server may reuse an existing compression context for a new attachment, // but in this case the stream must be resumable by the client within a // reasonable time frame. For H.265, for example, this means sending headers // with every keyframe, and a keyframe immediately after the attachment // begins. // // ### Audio compression // // The following apply to all supported audio codecs: // // - The server must use a 10ms or smaller packet size. // - Audio streams must use a sample rate of between 16kHz and 48kHz. // ### 051 - Video Chunk // // This message, which must originate from the server as a datagram or on the // same stream as the original `030 - Attach` message, contains a part of a // video packet. // // Much of the metadata associated with the chunk applies to all chunks of a // given packet, but must be repeated on each chunk such that a client // receiving the first (potentially out-of-order) chunk has sufficient // information to deal with it. message VideoChunk { // Required unless sent on the same stream as the original attach message. uint64 session_id = 1; uint64 attachment_id = 2; // Required. Represents the ordering of packets in a stream and the // association of packets to a video stream. uint64 stream_seq = 10; uint64 seq = 11; // Required unless an FEC scheme is used. Taken together, these represent the // placement of a chunk within a packet. // // If `fec_metadata` is set, both these fields must be unset. uint32 chunk = 12; uint32 num_chunks = 13; // If the encoder is using hierarchical coding (sometimes called SVC), this // field indicates the layer that the packet belongs to. uint32 hierarchical_layer = 16; // Contains FEC metadata to locate the chunk within the overall packet. FECMetadata fec_metadata = 15; // Required. A millisecond timestamp with an arbitrary epoch, used to // synchronize audio and video streams. uint64 timestamp = 20; // Required. The chunk of the video packet, or, if an FEC scheme is used, a // single symbol from the stream of symbols. bytes data = 99; } // ### 052 - Request Video Refresh // // This message, which must be sent by the client on the same stream as the // original `030 - Attach` message, requests that the server perform an IDR // refresh in the current video stream as soon as possible. The server should // send stream headers and a full keyframe as soon as possible, unless the // indicated stream is ended or restarted. message RequestVideoRefresh { uint64 stream_seq = 1; } // ### 056 - Audio Chunk // // This message, which must originate from the server as a datagram or on the // same stream as the original `030 - Attach` message, contains a part of an // audio packet. message AudioChunk { // Required unless sent on the same stream as the original attach message. uint64 session_id = 1; uint64 attachment_id = 2; // Required. Represents the ordering of packets in a stream and the // association of packets to an audio stream. uint64 stream_seq = 10; uint64 seq = 11; // Required unless an FEC scheme is used. Taken together, these represent the // placement of a chunk within a packet. // // If `fec_metadata` is set, both these fields must be unset. uint32 chunk = 12; uint32 num_chunks = 13; // Contains FEC metadata to locate the chunk within the overall packet. FECMetadata fec_metadata = 15; // Required. A millisecond timestamp with an arbitrary epoch, used to // synchronize audio and video streams. uint64 timestamp = 20; // Required. The chunk of the audio packet, or, if an FEC scheme is used, a // single symbol from the stream of symbols. bytes data = 99; } // ## Input // // Input messages are used by the client to indicate user interaction, whether // it be via a keyboard, mouse, gamepad, or some other input. Input is always // scoped to an attachment and sent on the attachment stream. // // ### Relative vs absolute cursor motion // // Clients are responsible for sending both absolute and relative pointer motion // events. The two event types are unrelated and do not compound; the former // represents the visible location of the cursor, while the latter represents // raw motion vectors from the device. // // Absolute motion is indicated by `063 - Pointer Motion` messages, while // relative motion is indicated by `069 - Relative Pointer Motion` messages. // // Absolute motion events are always necessary. Clients may choose to send // relative motion events only when the cursor is locked by a `067 - Lock // Pointer` event, until the cursor is released by a corresponding `068 - // Release Pointer` event. // ### 060 - Keyboard Input // // This message, which must originate from the client on the same stream as the // original `030 - Attach` message, represents keyboard input from the user. message KeyboardInput { enum KeyState { KEY_STATE_UNKNOWN = 0; KEY_STATE_PRESSED = 1; KEY_STATE_REPEAT = 2; KEY_STATE_RELEASED = 3; } // These map to the keycodes from the W3C "UI Events" specification. It // represents the key location, irrespective of keyboard layout or character // output. // // Media and remote control keys are omitted. // // https://w3c.github.io/uievents-code/#code-value-tables enum Key { KEY_UNKNOWN = 0; KEY_BACKQUOTE = 1; KEY_BACKSLASH = 2; KEY_BRACKET_LEFT = 3; KEY_BRACKET_RIGHT = 4; KEY_COMMA = 5; KEY_DIGIT_0 = 10; KEY_DIGIT_1 = 11; KEY_DIGIT_2 = 12; KEY_DIGIT_3 = 13; KEY_DIGIT_4 = 14; KEY_DIGIT_5 = 15; KEY_DIGIT_6 = 16; KEY_DIGIT_7 = 17; KEY_DIGIT_8 = 18; KEY_DIGIT_9 = 19; KEY_EQUAL = 20; KEY_INTL_BACKSLASH = 21; KEY_INTL_RO = 22; KEY_INTL_YEN = 23; KEY_A = 30; KEY_B = 31; KEY_C = 32; KEY_D = 33; KEY_E = 34; KEY_F = 35; KEY_G = 36; KEY_H = 37; KEY_I = 38; KEY_J = 39; KEY_K = 40; KEY_L = 41; KEY_M = 42; KEY_N = 43; KEY_O = 44; KEY_P = 45; KEY_Q = 46; KEY_R = 47; KEY_S = 48; KEY_T = 49; KEY_U = 50; KEY_V = 51; KEY_W = 52; KEY_X = 53; KEY_Y = 54; KEY_Z = 55; KEY_MINUS = 60; KEY_PERIOD = 61; KEY_QUOTE = 62; KEY_SEMICOLON = 63; KEY_SLASH = 64; KEY_ALT_LEFT = 65; KEY_ALT_RIGHT = 66; KEY_BACKSPACE = 67; KEY_CAPS_LOCK = 68; KEY_CONTEXT_MENU = 69; KEY_CONTROL_LEFT = 70; KEY_CONTROL_RIGHT = 71; KEY_ENTER = 72; KEY_META_LEFT = 73; KEY_META_RIGHT = 74; KEY_SHIFT_LEFT = 75; KEY_SHIFT_RIGHT = 76; KEY_SPACE = 77; KEY_TAB = 78; KEY_CONVERT = 79; KEY_KANA_MODE = 80; KEY_LANG_1 = 81; KEY_LANG_2 = 82; KEY_LANG_3 = 83; KEY_LANG_4 = 84; KEY_LANG_5 = 85; KEY_NON_CONVERT = 86; KEY_DELETE = 87; KEY_END = 88; KEY_HELP = 89; KEY_HOME = 90; KEY_INSERT = 91; KEY_PAGE_DOWN = 92; KEY_PAGE_UP = 93; KEY_ARROW_DOWN = 94; KEY_ARROW_LEFT = 95; KEY_ARROW_RIGHT = 96; KEY_ARROW_UP = 97; KEY_NUM_LOCK = 100; KEY_NUMPAD_0 = 101; KEY_NUMPAD_1 = 102; KEY_NUMPAD_2 = 103; KEY_NUMPAD_3 = 104; KEY_NUMPAD_4 = 105; KEY_NUMPAD_5 = 106; KEY_NUMPAD_6 = 107; KEY_NUMPAD_7 = 108; KEY_NUMPAD_8 = 109; KEY_NUMPAD_9 = 110; KEY_NUMPAD_ADD = 111; KEY_NUMPAD_BACKSPACE = 112; KEY_NUMPAD_CLEAR = 113; KEY_NUMPAD_CLEAR_ENTRY = 114; KEY_NUMPAD_COMMA = 115; KEY_NUMPAD_DECIMAL = 116; KEY_NUMPAD_DIVIDE = 117; KEY_NUMPAD_ENTER = 118; KEY_NUMPAD_EQUAL = 119; KEY_NUMPAD_HASH = 120; KEY_NUMPAD_MEMORY_ADD = 121; KEY_NUMPAD_MEMORY_CLEAR = 122; KEY_NUMPAD_MEMORY_RECALL = 123; KEY_NUMPAD_MEMORY_STORE = 124; KEY_NUMPAD_MEMORY_SUBTRACT = 125; KEY_NUMPAD_MULTIPLY = 126; KEY_NUMPAD_PAREN_LEFT = 127; KEY_NUMPAD_PAREN_RIGHT = 128; KEY_NUMPAD_SUBTRACT = 129; KEY_ESCAPE = 200; KEY_F1 = 201; KEY_F2 = 202; KEY_F3 = 203; KEY_F4 = 204; KEY_F5 = 205; KEY_F6 = 206; KEY_F7 = 207; KEY_F8 = 208; KEY_F9 = 209; KEY_F10 = 210; KEY_F11 = 211; KEY_F12 = 212; KEY_FN = 213; KEY_FN_LOCK = 214; KEY_PRINT_SCREEN = 215; KEY_SCROLL_LOCK = 216; KEY_PAUSE = 217; KEY_HIRAGANA = 218; KEY_KATAKANA = 219; } Key key = 1; // Required. The physical key that was pressed. KeyState state = 2; // Required. // A unicode code point for text input, required unless the keypress would // not result in a character. // // This may be completely unrelated to the physical key, depending on the // software keyboard layout on the client side. uint32 char = 3; } // ### 061 - Pointer Entered // // This message, which must be sent by the client on the same stream as the // original `030 - Attach` message, indicates that the Pointer has entered // the window area. message PointerEntered {} // ### 062 - Pointer Left // // This message, which must be sent by the client on the same stream as the // original `030 - Attach` message, indicates that the Pointer has left the // window area. message PointerLeft {} // ### 063 - Pointer Motion // // This message, which must be sent by the client on the same stream as the // original `030 - Attach` message, indicates that the Pointer has moved to a // new position. // // The coordinates should be in the space defined by the `streaming_resolution` // field of the `030 - Attach` message. message PointerMotion { double x = 1; // Required. double y = 2; // Required. } // ### 064 - Pointer Input // // This message, which must be sent by the client on the same stream as the // original `030 - Attach` message, indicates a Pointer button event. message PointerInput { enum ButtonState { BUTTON_STATE_UNKNOWN = 0; BUTTON_STATE_PRESSED = 1; BUTTON_STATE_RELEASED = 2; } enum Button { BUTTON_UNKNOWN = 0; BUTTON_LEFT = 1; BUTTON_MIDDLE = 2; BUTTON_RIGHT = 3; BUTTON_BACK = 4; BUTTON_FORWARD = 5; } Button button = 1; // Required. ButtonState state = 2; // Required. double x = 3; // Required. double y = 4; // Required. } // ### 065 - Pointer Scroll // // This message, which must be sent by the client on the same stream as the // original `030 - Attach` message, indicates that the user has scrolled, // either using the mouse wheel, a touchpad, or some other mechanism. // // The scroll_type determines how the values of x and y are interpreted. // `CONTINUOUS` indicates a vector in pixels, in the coordinate space defined // by the `resolution` parameter of the `VirtualDisplayParams` set on the // session. Discrete indicates individual steps, for example on a clicky // scroll wheel. // // In both cases, positive values indicate that the scrolled content should // move right and down, revealing more content to the top and left. message PointerScroll { enum ScrollType { SCROLL_TYPE_UNKNOWN = 0; SCROLL_TYPE_CONTINUOUS = 1; SCROLL_TYPE_DISCRETE = 2; } double x = 1; double y = 2; ScrollType scroll_type = 3; } // ### 066 - Update Cursor // // This message, which must be sent by the server on the same stream as the // original `030 - Attach` message, indicates that the cursor image has changed // and the client should use the new one when the cursor is over the window. message UpdateCursor { // Corresponds to the W3C UI specification. // // https://www.w3.org/TR/css-ui-3/#cursor enum CursorIcon { CURSOR_ICON_UNKNOWN = 0; CURSOR_ICON_AUTO = 1; CURSOR_ICON_DEFAULT = 2; CURSOR_ICON_NONE = 3; CURSOR_ICON_CONTEXT_MENU = 4; CURSOR_ICON_HELP = 5; CURSOR_ICON_POINTER = 6; CURSOR_ICON_PROGRESS = 7; CURSOR_ICON_WAIT = 8; CURSOR_ICON_CELL = 9; CURSOR_ICON_CROSSHAIR = 10; CURSOR_ICON_TEXT = 11; CURSOR_ICON_VERTICAL_TEXT = 12; CURSOR_ICON_ALIAS = 13; CURSOR_ICON_COPY = 14; CURSOR_ICON_MOVE = 15; CURSOR_ICON_NO_DROP = 16; CURSOR_ICON_NOT_ALLOWED = 17; CURSOR_ICON_GRAB = 18; CURSOR_ICON_GRABBING = 19; CURSOR_ICON_E_RESIZE = 20; CURSOR_ICON_N_RESIZE = 21; CURSOR_ICON_NE_RESIZE = 22; CURSOR_ICON_NW_RESIZE = 23; CURSOR_ICON_S_RESIZE = 24; CURSOR_ICON_SE_RESIZE = 25; CURSOR_ICON_SW_RESIZE = 26; CURSOR_ICON_W_RESIZE = 27; CURSOR_ICON_EW_RESIZE = 28; CURSOR_ICON_NS_RESIZE = 29; CURSOR_ICON_NESW_RESIZE = 30; CURSOR_ICON_NWSE_RESIZE = 31; CURSOR_ICON_COL_RESIZE = 32; CURSOR_ICON_ROW_RESIZE = 33; CURSOR_ICON_ALL_SCROLL = 34; CURSOR_ICON_ZOOM_IN = 35; CURSOR_ICON_ZOOM_OUT = 36; } // Required. CursorIcon icon = 1; // The cursor image, encoded as a PNG file. If set, the client should use // this and use the icon field solely as a fallback. bytes image = 2; // Relates to the image, if set. uint32 hotspot_x = 3; uint32 hotspot_y = 4; } // ### 067 - Lock Pointer // // This message, which must originate from the server on the same stream as the // original `030 - Attach` message, indicates that the pointer should be locked // to the given location. // // The coordinates should be in the space defined by the `streaming_resolution` // field of the `030 - Attach` message. message LockPointer { double x = 1; double y = 2; } // ### 068 - Release Pointer // // This message, which must originate from the server on the same stream as the // original `030 - Attach` message, indicates the pointer should be no longer // be locked. message ReleasePointer {} // ### 069 - Relative Pointer Motion // // This message, which must originate from the client on the same stream as the // original `030 - Attach` message, indicates that the Pointer has moved. // // The vector should be in the space defined by the `streaming_resolution` // field of the `030 - Attach` message. message RelativePointerMotion { double x = 1; // Required. double y = 2; // Required. } // ### 070 - Gamepad Available // // This message, which must originate from the client on the same stream as the // original `030 - Attach` message, indicates that a gamepad is available // on the client. message GamepadAvailable { // Required. The ID should remain stable if the gamepad is unplugged and // replugged. Gamepad gamepad = 1; } // ### 071 - Gamepad Unavailable // // This message, which must originate from the client on the same stream as the // original `030 - Attach` message, indicates that a gamepad is no longer // available, for example because it was unplugged. message GamepadUnavailable { uint64 id = 1; // Required. } // ### 072 - Gamepad Motion // // This message, which must originate from the client on the same stream as the // original `030 - Attach` message, indicates movement on a joystick or trigger. message GamepadMotion { enum GamepadAxis { GAMEPAD_AXIS_UNKNOWN = 0; // The left and right joysticks on a standard two-stick gamepad. GAMEPAD_AXIS_LEFT_X = 1; GAMEPAD_AXIS_LEFT_Y = 2; GAMEPAD_AXIS_RIGHT_X = 3; GAMEPAD_AXIS_RIGHT_Y = 4; // The soft triggers on a standard two-stick gamepad, usually called // L2 and R2. GAMEPAD_AXIS_LEFT_TRIGGER = 5; GAMEPAD_AXIS_RIGHT_TRIGGER = 6; } uint64 gamepad_id = 1; // Required. GamepadAxis axis = 2; // Required. // Required, with a value from -1.0 (towards the top of the gamepad) to 1.0 // (towards the bottom of the gamepad). Zero always represents the resting // position, and triggers will therefore usually range from 0.0 to 1.0 // (fully pressed). double value = 3; } // ### 073 - Gamepad Input // // This message, which must originate from the client on the same stream as the // original `030 - Attach` message, indicates input from a gamepad button. message GamepadInput { enum GamepadButtonState { GAMEPAD_BUTTON_STATE_UNKNOWN = 0; GAMEPAD_BUTTON_STATE_PRESSED = 1; GAMEPAD_BUTTON_STATE_RELEASED = 2; } enum GamepadButton { GAMEPAD_BUTTON_UNKNOWN = 0; GAMEPAD_BUTTON_DPAD_LEFT = 1; GAMEPAD_BUTTON_DPAD_RIGHT = 2; GAMEPAD_BUTTON_DPAD_UP = 3; GAMEPAD_BUTTON_DPAD_DOWN = 4; // X on a DualShock/DualSense, A on an Xbox gamepad, and B on a Nintendo // gamepad. GAMEPAD_BUTTON_SOUTH = 5; GAMEPAD_BUTTON_EAST = 6; GAMEPAD_BUTTON_NORTH = 7; GAMEPAD_BUTTON_WEST = 8; // The right and left shoulder buttons, usually called L1 and R1. GAMEPAD_BUTTON_SHOULDER_LEFT = 9; GAMEPAD_BUTTON_SHOULDER_RIGHT = 10; // The left and right joystick buttons, usually called L3 and R3. GAMEPAD_BUTTON_JOYSTICK_LEFT = 11; GAMEPAD_BUTTON_JOYSTICK_RIGHT = 12; // Assorted buttons on the face of the gamepad. GAMEPAD_BUTTON_START = 13; GAMEPAD_BUTTON_SELECT = 14; GAMEPAD_BUTTON_LOGO = 15; GAMEPAD_BUTTON_SHARE = 16; // Occasionally, gamepads will have another two buttons next to the NESW // buttons. GAMEPAD_BUTTON_C = 17; GAMEPAD_BUTTON_Z = 18; // Very rarely, gamepads will have another set of buttons rather than // triggers. GAMEPAD_BUTTON_TRIGGER_LEFT = 19; GAMEPAD_BUTTON_TRIGGER_RIGHT = 20; } uint64 gamepad_id = 1; // Required. GamepadButton button = 2; // Required. GamepadButtonState state = 3; // Required } ================================================ FILE: mm-protocol/src/timestamp.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::time; use crate::{ProtocolError, Timestamp}; impl TryFrom for std::time::SystemTime { type Error = ProtocolError; fn try_from(value: Timestamp) -> Result { if value.seconds <= 0 || value.nanos < 0 { return Err(ProtocolError::InvalidMessage); } std::time::SystemTime::UNIX_EPOCH .checked_add(time::Duration::from_secs(value.seconds as u64)) .and_then(|ts| ts.checked_add(time::Duration::from_nanos(value.nanos as u64))) .ok_or(ProtocolError::InvalidMessage) } } impl From for Timestamp { fn from(value: time::SystemTime) -> Self { let d = value.duration_since(time::UNIX_EPOCH).unwrap(); Self { seconds: d.as_secs() as i64, nanos: d.subsec_nanos() as i64, } } } ================================================ FILE: mm-server/Cargo.toml ================================================ # Copyright 2024 Colin Marc # # SPDX-License-Identifier: BUSL-1.1 [package] name = "mm-server" version = "0.8.4" edition = "2021" publish = false [[bin]] name = "mmserver" path = "src/main.rs" [dependencies] anyhow = "1" audiopus_sys = { version = "0.2", features = ["static"] } boring = "4" byteorder = "1" bytes = "1" clap = { version = "4", features = ["derive"] } clone3 = "0.2" converge = "0.0.5" crossbeam-channel = "0.5" cstr = "0.2" ctrlc = "3" cursor-icon = "1" dasp = { version = "0.11", features = [ "signal", "interpolate", "interpolate-sinc", "ring_buffer", ] } drm = "0.14" drm-fourcc = "2" either = "1" git-version = "0.3" glam = "0.24" hashbrown = "0.15" image = { version = "0.25", default-features = false, features = ["png"] } ip_rfc = "0.1" lazy_static = "1.4" libc = "0.2" libloading = "0.8" listenfd = "1" mdns-sd = "0.11" mio = { version = "1", features = ["net", "os-ext", "os-poll"] } mio-timerfd = "0.2" mktemp = "0.5" mm-protocol = { path = "../mm-protocol" } nix = { version = "0.29", features = ["net", "socket", "uio"] } num_enum = "0.7" octets = "0.2" oneshot = { version = "0.1", default-features = false, features = ["std"] } opus = "0.3" paste = "1" parking_lot = "0.12" pathsearch = "0.2" quiche = { version = "0.23", features = ["boringssl-boring-crate"] } rand = "0.8" raptorq = "2.0" rcgen = "0.12" regex = "1" ring = "0.17" scopeguard = "1.2" serde = "1" serde_json = "1" simple_moving_average = { version = "1" } slotmap = "1" thiserror = "1" threadpool = "1" tiny_id = "0.1" toml = "0.8" tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-tracy = { version = "0.11", default-features = false } tracy-client = { version = "0.17", default-features = false } uds = "0.4" uuid = "1" wayland-protocols = { version = "0.32", features = [ "server", "staging", "unstable", ] } wayland-scanner = "0.31" wayland-server = { version = "0.31", features = ["log"] } x11rb = { version = "0.13", features = ["composite"] } [dependencies.ash] git = "https://github.com/ash-rs/ash" rev = "92084df65f52aa15b704279fb6d8d26a3ee71809" [dependencies.fuser] git = "https://github.com/colinmarc/fuser" rev = "643facdc1bcc9a3b11d7a88ebfaaaa045c3596c1" default-features = false [dependencies.pulseaudio] git = "https://github.com/colinmarc/pulseaudio-rs" rev = "70ddb748f20ceecc20e963e571188124aeb30186" [dependencies.rustix] version = "1.0" features = [ "core", "event", "fs", "mm", "mount", "net", "pipe", "time", "thread", "stdio", "system", "process", ] [dependencies.southpaw] git = "https://github.com/colinmarc/southpaw" rev = "e022f2066b300c9600d69bac73e7d8ef7e19f08c" [build-dependencies] system-deps = "6" xkbcommon = { version = "0.7", default-features = false } [build-dependencies.slang] git = "https://github.com/colinmarc/slang-rs" rev = "075daa4faa8d1ab6d7bfbb5293812b087a527207" # Uses SLANG_DIR if set, otherwise builds slang from source features = ["from-source"] [package.metadata.system-deps] libavcodec = { version = "6", feature = "ffmpeg_encode" } [features] default = [] tracy = [ "tracy-client/enable", "tracy-client/broadcast", "tracing-tracy/enable", ] [dev-dependencies] pretty_assertions = "*" test-log = { version = "*", features = ["trace"] } [patch.crates-io] mio-timerfd = { git = "https://github.com/colinmarc/mio-timerfd.git" } ================================================ FILE: mm-server/build.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ffi::CString, path::PathBuf}; use xkbcommon::xkb; extern crate slang; fn main() { system_deps::Config::new().probe().unwrap(); let mut session = slang::GlobalSession::new(); let out_dir = std::env::var("OUT_DIR") .map(std::path::PathBuf::from) .expect("OUT_DIR not set"); compile_shader( &mut session, "src/session/video/composite.slang", out_dir.join("shaders/composite_vert.spv").to_str().unwrap(), "vert", slang::Stage::Vertex, [], ); compile_shader( &mut session, "src/session/video/composite.slang", out_dir.join("shaders/composite_frag.spv").to_str().unwrap(), "frag", slang::Stage::Fragment, [], ); compile_shader( &mut session, "src/session/video/convert.slang", out_dir .join("shaders/convert_multiplanar.spv") .to_str() .unwrap(), "main", slang::Stage::Compute, [], ); compile_shader( &mut session, "src/session/video/convert.slang", out_dir .join("shaders/convert_semiplanar.spv") .to_str() .unwrap(), "main", slang::Stage::Compute, [("SEMIPLANAR", "1")], ); // We need a keymap for the compositor, but it shouldn't affect much, since we // operate generally with physical keycodes and so do games. If this proves // limiting, we could allow the configuration of other virtual keyboards. let xkb_ctx = xkb::Context::new(0); save_keymap( &xkb_ctx, out_dir.join("keymaps/iso_us.txt").to_str().unwrap(), "", "pc105", "us", "", None, ); } fn compile_shader<'a>( session: &mut slang::GlobalSession, in_path: &str, out_path: &str, entry_point: &str, stage: slang::Stage, defines: impl IntoIterator, ) { std::fs::create_dir_all(PathBuf::from(out_path).parent().unwrap()) .expect("failed to create output directory"); let mut compile_request = session.create_compile_request(); compile_request .add_search_path("../shader-common") .set_codegen_target(slang::CompileTarget::Spirv) .set_optimization_level(slang::OptimizationLevel::Maximal) .set_target_profile(session.find_profile("glsl_460")); for (name, value) in defines { compile_request.add_preprocessor_define(name, value); } let entry_point = compile_request .add_translation_unit(slang::SourceLanguage::Slang, None) .add_source_file(in_path) .add_entry_point(entry_point, stage); let shader_bytecode = compile_request .compile() .expect("Shader compilation failed."); std::fs::write(out_path, shader_bytecode.get_entry_point_code(entry_point)) .expect("failed to write shader bytecode to file"); println!("cargo::rerun-if-changed={}", in_path); } fn save_keymap( ctx: &xkb::Context, out_path: &str, rules: &str, model: &str, layout: &str, variant: &str, options: Option<&str>, ) { std::fs::create_dir_all(PathBuf::from(out_path).parent().unwrap()) .expect("failed to create output directory"); let keymap = xkb::Keymap::new_from_names( ctx, rules, model, layout, variant, options.map(|s| s.to_string()), xkb::KEYMAP_COMPILE_NO_FLAGS, ) .expect("failed to create keymap"); let s = keymap.get_as_string(xkb::FORMAT_TEXT_V1); std::fs::write(out_path, CString::new(s).unwrap().to_bytes_with_nul()) .expect("failed to write keymap bytes to file"); } ================================================ FILE: mm-server/deny.toml ================================================ [licenses] allow = [ "MIT", "Apache-2.0", "Apache-2.0 WITH LLVM-exception", "BSD-2-Clause", "BSD-3-Clause", "Zlib", "ISC", "MPL-2.0", "Unicode-3.0", "Unicode-DFS-2016", ] confidence-threshold = 0.8 [licenses.private] ignore = true [advisories] ignore = ["RUSTSEC-2024-0436"] ================================================ FILE: mm-server/src/codec.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::sync::Arc; use anyhow::anyhow; use mm_protocol as protocol; use crate::vulkan::VkContext; /// A codec used for an attachment video stream. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum VideoCodec { H264, H265, Av1, } /// A codec used for an attachment audio stream. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum AudioCodec { Opus, } impl TryFrom for VideoCodec { type Error = anyhow::Error; fn try_from(codec: protocol::VideoCodec) -> anyhow::Result { match codec { protocol::VideoCodec::Unknown => Err(anyhow!("codec unset")), protocol::VideoCodec::H264 => Ok(Self::H264), protocol::VideoCodec::H265 => Ok(Self::H265), protocol::VideoCodec::Av1 => Ok(Self::Av1), } } } impl From for protocol::VideoCodec { fn from(codec: VideoCodec) -> Self { match codec { VideoCodec::H264 => protocol::VideoCodec::H264, VideoCodec::H265 => protocol::VideoCodec::H265, VideoCodec::Av1 => protocol::VideoCodec::Av1, } } } impl TryFrom for AudioCodec { type Error = anyhow::Error; fn try_from(codec: protocol::AudioCodec) -> anyhow::Result { match codec { protocol::AudioCodec::Unknown => Err(anyhow!("codec unset")), protocol::AudioCodec::Opus => Ok(Self::Opus), } } } impl From for protocol::AudioCodec { fn from(codec: AudioCodec) -> Self { match codec { AudioCodec::Opus => protocol::AudioCodec::Opus, } } } pub fn probe_codec(_vk: Arc, codec: VideoCodec) -> bool { match codec { VideoCodec::H264 if _vk.device_info.supports_h264 => true, VideoCodec::H265 if _vk.device_info.supports_h265 => true, _ => false, } } ================================================ FILE: mm-server/src/color.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 #![allow(dead_code)] use mm_protocol as protocol; /// A combination of color primaries, white point, and transfer function. We /// generally ignore white point, since we deal only with colorspaces using the /// D65 white point. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum ColorSpace { /// Uses BT.709 primaries and the sRGB transfer function. Srgb, /// Uses BT.709 primaries and a linear transfer function. Usually encoded as /// a float with negative values and values above 1.0 used to represent the /// extended space. LinearExtSrgb, /// Uses BT.2020 primaries and the ST2084 (PQ) transfer function. Hdr10, } impl ColorSpace { pub fn from_primaries_and_tf( primaries: Primaries, transfer_function: TransferFunction, ) -> Option { match (primaries, transfer_function) { (Primaries::Srgb, TransferFunction::Srgb) => Some(ColorSpace::Srgb), (Primaries::Srgb, TransferFunction::Linear) => Some(ColorSpace::LinearExtSrgb), (Primaries::Bt2020, TransferFunction::Pq) => Some(ColorSpace::Hdr10), _ => None, } } } // A configuration for a compressed video bitstream. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum VideoProfile { // Uses a bit depth of 8, BT.709 primaries and transfer function. Hd, // Uses a bit depth of 10, BT.2020 primaries and the ST2084 (PQ) transfer function. Hdr10, } impl TryFrom for VideoProfile { type Error = String; fn try_from(profile: protocol::VideoProfile) -> Result { match profile { protocol::VideoProfile::Hd => Ok(VideoProfile::Hd), protocol::VideoProfile::Hdr10 => Ok(VideoProfile::Hdr10), _ => Err("invalid video profile".into()), } } } impl From for protocol::VideoProfile { fn from(profile: VideoProfile) -> Self { match profile { VideoProfile::Hd => protocol::VideoProfile::Hd, VideoProfile::Hdr10 => protocol::VideoProfile::Hdr10, } } } #[derive(Debug, Clone, Copy)] pub enum TransferFunction { Linear, Srgb, Pq, } #[derive(Debug, Clone, Copy)] pub enum Primaries { Srgb, Bt2020, } ================================================ FILE: mm-server/src/config.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ collections::BTreeMap, ffi::{OsStr, OsString}, net::ToSocketAddrs, num::NonZeroU32, path::{Component, Path, PathBuf}, time, }; use anyhow::{bail, Context}; use lazy_static::lazy_static; use regex::Regex; use tracing::trace; lazy_static! { static ref ID_RE: Regex = Regex::new(r"\A[a-z][a-z0-9-_]{0,256}\z").unwrap(); static ref DESCRIPTION_RE: Regex = Regex::new(r"\A[A-Za-z0-9-_:() ]{0,256}\z").unwrap(); static ref PATH_COMPONENT_RE: Regex = Regex::new(r"\A[A-Za-z0-9-_ ]{0,64}\z").unwrap(); static ref DEFAULT_CFG: parsed::Config = toml::from_str(include_str!("../../mmserver.default.toml")).unwrap(); } const MAX_APP_PATH_COMPONENTS: usize = 8; pub const MAX_IMAGE_SIZE: u64 = 1024 * 1024; /// Serde representations of the configuration files. mod parsed { use std::{collections::BTreeMap, num::NonZeroU32, path::PathBuf}; use converge::Converge; use serde::Deserialize; #[derive(Debug, Clone, PartialEq)] pub(super) enum NonZeroOrInf { Value(NonZeroU32), Infinity, } impl<'de> Deserialize<'de> for NonZeroOrInf { fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de>, { #[derive(Deserialize)] #[serde(untagged, expecting = "a positive integer or \"inf\"")] enum Variant { Value(NonZeroU32), Infinity(f64), } match Deserialize::deserialize(deserializer)? { Variant::Value(n) => Ok(NonZeroOrInf::Value(n)), Variant::Infinity(f) => { if f.is_infinite() { Ok(NonZeroOrInf::Infinity) } else { Err(serde::de::Error::invalid_value( serde::de::Unexpected::Float(f), &"a positive integer or \"inf\"", )) } } } } } #[derive(Debug, Clone, PartialEq, Deserialize, Converge)] pub(super) struct Config { pub(super) include_apps: Option>, pub(super) apps: Option>, pub(super) data_home: Option, #[converge(nest)] pub(super) server: Option, #[converge(nest)] pub(super) default_app_settings: Option, } #[derive(Debug, Clone, PartialEq, Deserialize, Converge)] #[serde(deny_unknown_fields)] pub(super) struct ServerConfig { pub(super) bind: Option, pub(super) bind_systemd: Option, pub(super) tls_cert: Option, pub(super) tls_key: Option, pub(super) worker_threads: Option, pub(super) max_connections: Option, pub(super) mdns: Option, pub(super) mdns_hostname: Option, pub(super) mdns_instance_name: Option, pub(super) video_fec_ratios: Option>, } #[derive(Debug, Clone, PartialEq, Deserialize, Converge)] #[serde(deny_unknown_fields)] pub(super) struct DefaultAppSettings { pub(super) xwayland: Option, pub(super) force_1x_scale: Option, pub(super) session_timeout: Option, pub(super) isolate_home: Option, pub(super) tmp_home: Option, } #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(deny_unknown_fields)] pub(super) struct AppConfig { pub(super) app_path: Option, pub(super) description: Option, pub(super) header_image: Option, pub(super) command: Vec, pub(super) environment: Option>, pub(super) xwayland: Option, pub(super) force_1x_scale: Option, pub(super) session_timeout: Option, pub(super) isolate_home: Option, pub(super) shared_home_name: Option, pub(super) tmp_home: Option, } } #[derive(Debug, Clone, PartialEq)] pub struct Config { pub server: ServerConfig, pub apps: BTreeMap, pub data_home: PathBuf, pub bug_report_dir: Option, } #[derive(Debug, Clone, PartialEq)] pub struct ServerConfig { pub bind: String, pub bind_systemd: bool, pub tls_cert: Option, pub tls_key: Option, pub worker_threads: NonZeroU32, pub max_connections: Option, pub mdns: bool, pub mdns_hostname: Option, pub mdns_instance_name: Option, pub video_fec_ratios: Vec, } #[derive(Debug, Clone, PartialEq)] pub struct AppConfig { pub description: Option, pub path: Vec, pub header_image: Option, pub command: Vec, pub env: BTreeMap, pub xwayland: bool, pub force_1x_scale: bool, pub session_timeout: Option, pub home_isolation_mode: HomeIsolationMode, } #[derive(Debug, Clone, PartialEq, Eq)] pub enum HomeIsolationMode { Unisolated, Tmpfs, Permanent(PathBuf), } impl Config { pub fn new(path: Option<&PathBuf>, includes: &[PathBuf]) -> anyhow::Result { let file = path .map(|p| p.to_owned()) .or_else(locate_default_config_file); let cfg = if let Some(file) = file { let content = std::fs::read_to_string(&file)?; let parsed: parsed::Config = toml::from_str(&content) .context(format!("parsing configuration file {}", file.display()))?; Some(parsed) } else { None }; let this = Self::build(cfg, includes)?; this.validate()?; Ok(this) } fn build(cfg: Option, includes: &[PathBuf]) -> anyhow::Result { // This is the parsed mmserver.defaults.toml. let defaults = DEFAULT_CFG.clone(); let input = if let Some(cfg) = cfg { // Merge the default config with the input config, giving the input // precedence. use converge::Converge; cfg.converge(defaults) } else { defaults }; let data_home = input.data_home.or_else(|| { if let Ok(xdg_data_home) = std::env::var("XDG_DATA_HOME") { Some(Path::new(&xdg_data_home).join("mmserver")) } else if let Ok(home) = std::env::var("HOME") { Some( Path::new(&home) .join(".local") .join("share") .join("mmserver"), ) } else { None } }); let data_home = data_home.ok_or(anyhow::anyhow!( "failed to determine `data_home`. Set it explicitly or set one of $HOME or \ $XDG_DATA_HOME" ))?; // We only unwrap values that should have been set in the default // config. This is verified by a test. let server = input.server.unwrap(); let default_app_settings = input.default_app_settings.unwrap(); let mut this = Config { server: ServerConfig { bind: server.bind.unwrap(), bind_systemd: server.bind_systemd.unwrap(), tls_cert: server.tls_cert, tls_key: server.tls_key, worker_threads: server.worker_threads.unwrap(), max_connections: match server.max_connections.unwrap() { parsed::NonZeroOrInf::Value(n) => Some(n), parsed::NonZeroOrInf::Infinity => None, }, mdns: server.mdns.unwrap(), mdns_hostname: server.mdns_hostname, mdns_instance_name: server.mdns_instance_name, video_fec_ratios: server.video_fec_ratios.unwrap(), }, data_home: data_home.clone(), apps: BTreeMap::new(), // Handled below. bug_report_dir: None, // This is only set from the command line. }; // Collect additional app definitions from app_dirs. let cfg_includes = input.include_apps.unwrap_or_default(); let includes = cfg_includes.iter().chain(includes); let apps = input.apps.unwrap_or_default(); let additional_apps = includes .map(|p| collect_includes(p).context(format!("searching {}", p.display()))) .collect::, _>>()? .into_iter() .flatten(); for (id, app) in apps.into_iter().chain(additional_apps) { if this.apps.contains_key(&id) { bail!("duplicate app name: {}", id); } let app = validate_app(&id, app, &default_app_settings, &data_home) .context(format!("failed to load app config for '{}'", id))?; this.apps.insert(id, app); } trace!("using config: {:#?}", this); Ok(this) } /// Performs high-level validation on the final configuration. fn validate(&self) -> anyhow::Result<()> { if self.apps.is_empty() { bail!("at least one application must be defined"); } for (name, app) in &self.apps { if app.command.is_empty() { bail!("empty command for application {name:?}"); } } let addr = self .server .bind .to_socket_addrs() .map(|mut addrs| addrs.next().unwrap()) .map_err(|_| anyhow::anyhow!("invalid address \"{}\"", self.server.bind))?; // Check that TLS is set up (for non-private addresses). let ip = addr.ip(); let tls_required = (ip_rfc::global(&ip) || ip.is_unspecified()) && (self.server.tls_cert.is_none() || self.server.tls_key.is_none()); if tls_required && (self.server.tls_cert.is_none() || self.server.tls_key.is_none()) { bail!("TLS required for non-private address \"{}\"", addr); } // Validate that the TLS cert and key exist. match self.server.tls_cert { Some(ref cert) if !cert.exists() => { bail!("TLS certificate not found at {}", cert.display()); } _ => {} } match self.server.tls_key { Some(ref key) if !key.exists() => { bail!("TLS private key not found at {}", key.display()); } _ => {} } Ok(()) } } impl Default for Config { fn default() -> Self { Config::build(None, &[]).expect("failed to build default config") } } fn collect_includes(p: impl AsRef) -> anyhow::Result> { let mut res = Vec::new(); let p = p.as_ref(); if !p.is_dir() { return Ok(vec![include_file(p)?]); } for entry in p.read_dir()? { let entry = entry?; match entry.file_type() { Ok(t) if t.is_file() => { let path = entry.path(); let ext = path.extension().and_then(OsStr::to_str); if matches!(ext, Some("toml") | Some("json")) { res.push(include_file(&path).context(format!("reading {}", path.display()))?) } } _ => continue, } } Ok(res) } fn include_file(p: impl AsRef) -> anyhow::Result<(String, parsed::AppConfig)> { let p = p.as_ref(); let name = p .file_stem() .and_then(OsStr::to_str) .ok_or_else(|| anyhow::anyhow!("invalid file name"))?; let content = std::fs::read_to_string(p)?; let app = match p.extension().and_then(OsStr::to_str) { Some("toml") => toml::from_str(&content)?, Some("json") => serde_json::from_str(&content)?, _ => bail!("invalid file extension"), }; Ok((name.to_owned(), app)) } fn locate_default_config_file() -> Option { const BASENAME: &str = "/etc/magic-mirror/mmserver"; for ext in &["toml", "json"] { let path = PathBuf::from(BASENAME).with_extension(ext); if path.exists() { return Some(path); } } None } fn validate_app( id: &str, app: parsed::AppConfig, defaults: &parsed::DefaultAppSettings, data_home: &Path, ) -> anyhow::Result { if !ID_RE.is_match(id) { bail!("invalid name: {}", id); } if app .description .as_ref() .is_some_and(|desc| !DESCRIPTION_RE.is_match(desc)) { bail!("invalid description: {}", app.description.unwrap()) } let path = match app.app_path { None => Vec::new(), Some(p) => validate_app_path(p)?, }; if let Some(p) = &app.header_image { let len = p.metadata()?.len(); if len > MAX_IMAGE_SIZE { bail!( "image is {} bytes, over the maximum of {}: {}", len, MAX_IMAGE_SIZE, p.display() ); } } let session_timeout = match app .session_timeout .or(defaults.session_timeout.clone()) .unwrap() { parsed::NonZeroOrInf::Value(v) => Some(time::Duration::from_secs(v.get() as u64)), parsed::NonZeroOrInf::Infinity => None, }; let isolate_home = app.isolate_home.or(defaults.isolate_home).unwrap(); let tmp_home = app.tmp_home.or(defaults.tmp_home).unwrap(); let home_isolation_mode = match (isolate_home, tmp_home) { (false, true) => bail!("if isolate_home = false, tmp_home must also be false"), (false, false) => HomeIsolationMode::Unisolated, (true, true) => HomeIsolationMode::Tmpfs, (true, false) => { if let Some(s) = app.shared_home_name { if !ID_RE.is_match(&s) { bail!("invalid shared_home_name: {s}",) } HomeIsolationMode::Permanent(data_home.join("homes").join(s)) } else { HomeIsolationMode::Permanent(data_home.join("homes").join(id)) } } }; Ok(AppConfig { path, description: app.description, header_image: app.header_image, command: app.command.into_iter().map(OsString::from).collect(), env: app .environment .unwrap_or_default() .into_iter() .map(|(k, v)| (OsString::from(k), OsString::from(v))) .collect(), xwayland: app.xwayland.or(defaults.xwayland).unwrap(), force_1x_scale: app.force_1x_scale.or(defaults.force_1x_scale).unwrap(), session_timeout, home_isolation_mode, }) } fn validate_app_path(p: String) -> anyhow::Result> { let components = Path::new(&p).components(); let mut out = Vec::new(); for component in components { if let Some(s) = validate_app_path_component(component) { out.push(s); } else { bail!("invalid path compontent: {:?}", component.as_os_str()); } } if out.len() > MAX_APP_PATH_COMPONENTS { bail!("app_path has too many components"); } Ok(out) } fn validate_app_path_component(component: Component) -> Option { match component { Component::Normal(s) => { let comp = s.to_str()?; if !PATH_COMPONENT_RE.is_match(comp) { None } else { Some(comp.trim().to_owned()) } } _ => None, } } #[cfg(test)] mod test { use pretty_assertions::assert_eq; use super::*; lazy_static! { static ref EXAMPLE_APP: AppConfig = AppConfig { path: Vec::new(), description: None, header_image: None, command: vec!["echo".to_owned().into(), "hello".to_owned().into()], env: Default::default(), xwayland: true, force_1x_scale: false, session_timeout: Some(time::Duration::from_secs(3600)), home_isolation_mode: HomeIsolationMode::Unisolated, }; } fn config_from_str(s: &str) -> anyhow::Result { let input: parsed::Config = toml::from_str(s)?; Config::build(Some(input), &[]) } #[test] fn test_default() { let mut config = Config::default(); config .apps .insert("example".to_string(), EXAMPLE_APP.clone()); config.validate().expect("default config is valid"); assert_eq!(config.server.bind, "localhost:9599"); } #[test] fn test_only_app() { let config = config_from_str( r#" [apps.example] command = ["echo", "hello"] isolate_home = false "#, ) .unwrap(); config.validate().expect("empty config is valid"); let mut expected = Config::default(); expected .apps .insert("example".to_string(), EXAMPLE_APP.clone()); assert_eq!(config, expected); } #[test] fn tls_required_for_global_addr() { let config = config_from_str( r#" [server] bind = "8.8.8.8:9599" [apps.example] command = ["echo", "hello"] "#, ) .unwrap(); eprintln!("{:?}", config.server); match config.validate() { Err(e) => { assert_eq!( e.to_string(), "TLS required for non-private address \"8.8.8.8:9599\"" ) } _ => panic!("expected error"), } } #[test] fn tls_required_for_unspecified() { let config = config_from_str( r#" [server] bind = "[::]:9599" [apps.example] command = ["echo", "hello"] "#, ) .unwrap(); match config.validate() { Err(e) => { assert_eq!( e.to_string(), "TLS required for non-private address \"[::]:9599\"" ) } _ => panic!("expected error"), } } #[test] fn tls_not_required_for_tailscale() { let config = config_from_str( r#" [server] bind = "100.64.123.45:9599" [apps.example] command = ["echo", "hello"] "#, ) .unwrap(); config .validate() .expect("TLS not required for shared NAT address"); } #[test] fn app_paths() { assert!(validate_app_path("foo!".into()).is_err()); assert!(validate_app_path("C:\\\\foo\\bar".into()).is_err()); let expected: Vec = vec!["Foo Bar".into(), "Baz".into(), "Qux".into()]; assert_eq!( expected, validate_app_path("Foo Bar/ Baz/Qux ".into()).unwrap() ) } } ================================================ FILE: mm-server/src/container/ipc.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::os::fd::{AsFd, AsRawFd, FromRawFd, OwnedFd}; use std::{io, time}; use rustix::event::{eventfd, poll, EventfdFlags, PollFd, PollFlags}; use rustix::io::{read, write, Errno}; /// An IPC barrier using eventfd(2). pub struct EventfdBarrier { a: OwnedFd, b: OwnedFd, other: bool, } impl EventfdBarrier { pub fn new() -> io::Result<(Self, Self)> { let a = eventfd(0, EventfdFlags::NONBLOCK)?; let b = eventfd(0, EventfdFlags::NONBLOCK)?; let a2 = a.try_clone()?; let b2 = b.try_clone()?; Ok(( Self { a, b, other: false }, Self { a: a2, b: b2, other: true, }, )) } // Waits at the barrier, timing out after the given duration. pub fn sync(&self, timeout: time::Duration) -> rustix::io::Result<()> { if self.other { wait_eventfd(&self.a, timeout)?; signal_eventfd(&self.b)?; } else { signal_eventfd(&self.a)?; wait_eventfd(&self.b, timeout)?; } Ok(()) } } /// Creates an IPC channel for sending a file descriptor. pub fn fd_oneshot() -> io::Result<(FdSender, FdReceiver)> { let (sender, receiver) = uds::UnixSeqpacketConn::pair()?; Ok((FdSender(sender), FdReceiver(receiver))) } pub struct FdSender(uds::UnixSeqpacketConn); impl FdSender { pub fn send_timeout(self, fd: OwnedFd, timeout: time::Duration) -> io::Result<()> { self.0.set_write_timeout(Some(timeout))?; let raw_fd = fd.as_raw_fd(); self.0.send_fds(&[], &[raw_fd])?; // The FD gets dropped here, along with our end of the connection. Ok(()) } } pub struct FdReceiver(uds::UnixSeqpacketConn); impl FdReceiver { pub fn recv_timeout(self, timeout: time::Duration) -> io::Result { self.0.set_read_timeout(Some(timeout))?; let mut fds = [-1]; self.0.recv_fds(&mut [], &mut fds)?; if fds[0] <= 0 { return Err(io::Error::new( io::ErrorKind::InvalidData, "unexpected message received", )); } let fd = unsafe { OwnedFd::from_raw_fd(fds[0]) }; Ok(fd) } } fn signal_eventfd(fd: impl AsFd) -> rustix::io::Result<()> { loop { match write(&fd, &1_u64.to_ne_bytes()).map(|_| ()) { Err(Errno::INTR) => continue, v => return v, } } } fn wait_eventfd(fd: impl AsFd, timeout: time::Duration) -> rustix::io::Result<()> { let mut pollfd = [PollFd::new(&fd, PollFlags::IN)]; let mut buf = [0; 8]; let timespec = timeout.try_into().expect("invalid duration"); loop { match poll(&mut pollfd, Some(×pec)) { Ok(0) => return Err(Errno::TIMEDOUT), Ok(_) => return read(fd, &mut buf).map(|_| ()), Err(Errno::INTR) => continue, Err(e) => return Err(e), } } } ================================================ FILE: mm-server/src/container/runtime.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ ffi::{CStr, CString, OsStr, OsString}, fs::OpenOptions, io, os::{ fd::{AsFd, AsRawFd as _, BorrowedFd, FromRawFd as _, OwnedFd}, unix::process::CommandExt as _, }, path::{Path, PathBuf}, process::Command, str::FromStr as _, time, }; use anyhow::{anyhow, bail, Context as _}; use pathsearch::{find_executable_in_path, unix::is_executable}; use rand::distributions::{Alphanumeric, DistString as _}; use rustix::{ fs::{mkdirat, openat, symlinkat, FileType, Gid, Mode, OFlags, Uid, CWD as AT_FDCWD}, io::{fcntl_dupfd_cloexec, write, Errno}, mount::{ fsconfig_create, fsconfig_set_flag, fsconfig_set_string, fsmount, fsopen, move_mount, open_tree, FsMountFlags, FsOpenFlags, MountAttrFlags, MoveMountFlags, OpenTreeFlags, }, process::{getgid, getuid, set_parent_process_death_signal, waitpid, Pid, Signal, WaitOptions}, thread::{move_into_link_name_space, LinkNameSpaceType}, }; use tracing::debug; use super::ipc; use crate::config::HomeIsolationMode; // In CPU-constrained testing environments, we sometimes need to wait // to get scheduled. #[cfg(test)] const SYNC_TIMEOUT: time::Duration = time::Duration::from_secs(5); #[cfg(not(test))] const SYNC_TIMEOUT: time::Duration = time::Duration::from_secs(1); #[derive(Debug, Clone, Copy)] struct DevBindMount { path: &'static str, is_dir: bool, } const DEV_BIND_MOUNTS: &[DevBindMount] = &[ DevBindMount { path: "/dev/null", is_dir: false, }, DevBindMount { path: "/dev/zero", is_dir: false, }, DevBindMount { path: "/dev/full", is_dir: false, }, DevBindMount { path: "/dev/tty", is_dir: false, }, DevBindMount { path: "/dev/random", is_dir: false, }, DevBindMount { path: "/dev/urandom", is_dir: false, }, DevBindMount { path: "/dev/dri", is_dir: true, }, DevBindMount { path: "/dev/fuse", is_dir: false, }, // Needed for NVIDIA proprietary drivers. DevBindMount { path: "/dev/nvidiactl", is_dir: false, }, DevBindMount { path: "/dev/nvidia0", is_dir: false, }, DevBindMount { path: "/dev/nvidia-modeset", is_dir: false, }, DevBindMount { path: "/dev/nvidia-uvm", is_dir: false, }, DevBindMount { path: "/dev/nvidia-uvm-tools", is_dir: false, }, DevBindMount { path: "/dev/nvidia-caps", is_dir: true, }, ]; #[cfg(debug_assertions)] struct UnbufferedStderr<'a>(BorrowedFd<'a>); #[cfg(debug_assertions)] impl std::fmt::Write for UnbufferedStderr<'_> { fn write_str(&mut self, s: &str) -> std::fmt::Result { write(self.0, s.as_bytes()).map_err(|_| std::fmt::Error)?; Ok(()) } } #[cfg(debug_assertions)] macro_rules! preexec_debug { ($($arg:tt)+) => { #[allow(unused_imports)] use std::fmt::Write as _; let mut stderr = UnbufferedStderr(rustix::stdio::stderr()); let _ = std::write!(stderr, "[PRE-EXEC] "); let _ = std::writeln!(stderr, $($arg)*); } } #[cfg(not(debug_assertions))] macro_rules! preexec_debug { ($($arg:tt)*) => {}; } unsafe fn _must(_op: &str, res: rustix::io::Result) -> T { loop { match res { Ok(v) => return v, Err(Errno::INTR) => continue, Err(_e) => { #[cfg(debug_assertions)] { use std::fmt::Write as _; let mut stderr = UnbufferedStderr(rustix::stdio::stderr()); let _ = std::writeln!(stderr, "[PRE-EXEC] {_op}: {_e}"); let _ = std::writeln!(stderr); } libc::_exit(1); } } } } macro_rules! must { ($n:ident( $($args:tt)* )) => {{ let res = $n( $($args)* ); _must(stringify!($n), res) }}; } type SetupHook = Box anyhow::Result<()>>; /// A lightweight linux container. Currently we use the following namespaces: /// - A mount namespace, to mount tmpfs on /dev, /tmp, /run, etc, and /// potentially to isolate home as well. We don't pivot_root/chroot. /// - A PID namespace, so that processes get cleaned up when a session ends. /// Note that we currently don't use a "stub init" process to handle /// reparenting or reaping, since we don't expect to spawn lots of grandchild /// processes. /// - A user namespace, to enable the above. We just map the current user to /// itself. /// /// IMPORTANT: This container is not a secure container. Under NO CIRCUMSTANCES /// should you use it to run untrusted code. Any security benefits are purely /// incidental; this is more about containing mess (I'm looking at you, Steam). pub struct Container { child_cmd: Command, // Note: we don't use Command::env or Command::env_clear, because those // cause Command::exec to allocate, which we don't want to do after forking. envs: Vec, tmp_stderr: Option, extern_home_path: Option, intern_home_path: PathBuf, clear_home: bool, intern_run_path: PathBuf, extern_run_path: PathBuf, additional_bind_mounts: Vec<(PathBuf, PathBuf)>, internal_bind_mounts: Vec<(PathBuf, PathBuf, bool)>, // Stores a closure to run before unfreeze. setup_hooks: Vec, uid: Uid, gid: Gid, } impl Container { pub fn new( mut args: Vec, home_isolation_mode: HomeIsolationMode, ) -> anyhow::Result { let exe_path = validate_exe(args.remove(0))?; let mut envs = Vec::new(); for key in [ "PATH", "USER", "SHELL", "EDITOR", "LANG", "LC_ALL", "LC_ADDRESS", "LC_IDENTIFICATION", "LC_MEASUREMENT", "LC_MONETARY", "LC_NAME", "LC_NUMERIC", "LC_PAPER", "LC_TELEPHONE", "LC_TIME", ] { if let Some(value) = std::env::var_os(key) { envs.push(make_putenv(key, value)); } } let uid = getuid(); let gid = getgid(); let intern_run_path: OsString = format!("/run/user/{}", uid.as_raw()).try_into().unwrap(); envs.push(make_putenv("XDG_RUNTIME_DIR", intern_run_path.clone())); let extern_run_path = std::env::temp_dir().join(format!( "mm.{}", Alphanumeric.sample_string(&mut rand::thread_rng(), 16), )); std::fs::create_dir_all(&extern_run_path)?; let intern_home_path: OsString = std::env::var_os("HOME").unwrap_or("/home/mm".into()); envs.push(make_putenv("HOME", intern_home_path.clone())); debug!(home_mode = ?home_isolation_mode, "using home mode"); let (extern_home_path, clear_home) = match home_isolation_mode { HomeIsolationMode::Unisolated => (None, false), HomeIsolationMode::Tmpfs => (None, true), HomeIsolationMode::Permanent(path) => { std::fs::create_dir_all(&path).context(format!( "failed to create home directory {}", path.display() ))?; (Some(path), true) } }; if clear_home && exe_path.starts_with(&intern_home_path) { bail!( "command {:?} will be unavailable in container (set isolate_home = false to avoid \ this error)", exe_path.display(), ); } let mut child_cmd = Command::new(exe_path); child_cmd.current_dir("/"); child_cmd.args(args); Ok(Self { child_cmd, envs, tmp_stderr: None, intern_home_path: intern_home_path.into(), extern_home_path, clear_home, intern_run_path: intern_run_path.into(), extern_run_path, additional_bind_mounts: Vec::new(), internal_bind_mounts: Vec::new(), setup_hooks: Vec::new(), uid, gid, }) } pub fn intern_run_path(&self) -> &Path { &self.intern_run_path } pub fn extern_run_path(&self) -> &Path { &self.extern_run_path } pub fn bind_mount(&mut self, src: impl AsRef, dst: impl AsRef) { self.additional_bind_mounts .push((src.as_ref().to_owned(), dst.as_ref().to_owned())); } pub fn internal_bind_mount(&mut self, src: impl AsRef, dst: impl AsRef) { self.internal_bind_mounts .push((src.as_ref().to_owned(), dst.as_ref().to_owned(), true)); } pub fn setup_hook( &mut self, f: impl FnOnce(&mut super::ContainerHandle) -> anyhow::Result<()> + 'static, ) { self.setup_hooks.push(Box::new(f)) } pub unsafe fn pre_exec(&mut self, f: impl FnMut() -> io::Result<()> + Send + Sync + 'static) { self.child_cmd.pre_exec(f); } pub fn set_env(&mut self, key: K, val: V) where K: AsRef, V: AsRef, { self.envs.push(make_putenv(key, val)) } pub fn set_stdout(&mut self, stdio: T) -> anyhow::Result<()> { let stdout = fcntl_dupfd_cloexec(&stdio, 0)?; self.child_cmd.stdout(stdout); Ok(()) } pub fn set_stderr(&mut self, stdio: T) -> anyhow::Result<()> { let stderr = fcntl_dupfd_cloexec(&stdio, 0)?; let tmp_stderr = fcntl_dupfd_cloexec(&stdio, 0)?; self.child_cmd.stderr(stderr); self.tmp_stderr = Some(tmp_stderr); Ok(()) } pub fn spawn(mut self) -> anyhow::Result { // Prepare bind mounts. let mut mounts = DEV_BIND_MOUNTS .iter() .map(|m| { Ok(( PathBuf::from_str(m.path).unwrap(), PathBuf::from_str(m.path).unwrap(), m.is_dir, None, )) }) .collect::>>()?; for (src, dst) in self.additional_bind_mounts.drain(..) { let is_dir = std::fs::metadata(&src) .context("failed to stat bind mount")? .is_dir(); mounts.push((src, dst, is_dir, None)) } let mut child_pidfd = -1; let mut args = clone3::Clone3::default(); args.flag_pidfd(&mut child_pidfd) .exit_signal(libc::SIGCHLD as _) .flag_newuser() .flag_newns() .flag_newpid(); debug!(cmd = ?self.child_cmd, "spawning child process"); let (barrier, child_barrier) = ipc::EventfdBarrier::new()?; // clone off a child process, which does some setup before execing the // app. let child_stderr = self.tmp_stderr.take(); let child_pid = match unsafe { args.call().context("clone3")? } { 0 => unsafe { self.child_after_fork(child_stderr.as_ref(), child_barrier, &mut mounts) }, pid => pid, }; let child_pidfd = unsafe { OwnedFd::from_raw_fd(child_pidfd) }; set_uid_map(child_pid, self.uid, self.gid).context("failed to set uid/gid map")?; // Wait for the child to signal that it's ready. barrier .sync(SYNC_TIMEOUT) .context("timed out waiting for forked child (phase 1)")?; let mut handle = super::ContainerHandle { pid: Pid::from_raw(child_pid).unwrap(), pidfd: child_pidfd, run_path: self.extern_run_path, }; for hook in self.setup_hooks.drain(..) { hook(&mut handle)?; } // Unfreeze the child. barrier .sync(SYNC_TIMEOUT) .context("timed out waiting for forked child (phase 2)")?; Ok(handle) } // Signal safety dictates what we can do here, and it's not a lot. The main // thing we avoid is allocations. unsafe fn child_after_fork( mut self, stderr: Option, barrier: ipc::EventfdBarrier, bind_mounts: &mut [(PathBuf, PathBuf, bool, Option)], ) -> ! where FD: AsFd, { // See above for how logging is implemented to avoid the possibility of // allocation. if let Some(fd) = &stderr { let _ = rustix::stdio::dup2_stderr(fd.as_fd()); // Replace stderr. } // Tell the kernel to SIGKILL us when our parent (mmserver) dies. this // is particularly important because we're PID 1, so the kernel won't // kill on SIGINT/SIGQUIT/etc if the child process doesn't have a signal // handler set up for them. must!(set_parent_process_death_signal(Some(Signal::KILL))); preexec_debug!("starting container setup"); // Mount /proc first. must!(mount_fs( c"proc", c"/proc", MountAttrFlags::MOUNT_ATTR_NOEXEC | MountAttrFlags::MOUNT_ATTR_NOSUID | MountAttrFlags::MOUNT_ATTR_NODEV, &[], )); // Collect detached mounts we want to bind-mount later. We can't // allocate a vec, so we fill in the Options in the passed-in vec // instead. preexec_debug!("collecting detached bind mounts"); for (src_path, _, _, ref mut device_fd) in bind_mounts.iter_mut() { if src_path.exists() { let fd = must!(detach_mount(src_path,)); *device_fd = Some(fd) } } // Grab a detached mount for the temporary dir we're going to mount as // XDG_RUNTIME_DIR. let detached_run_fd = must!(detach_mount(&self.extern_run_path,)); // Grab a detached mount for home, if we're using one. let detached_home = self .extern_home_path .as_ref() .map(|p| must!(detach_mount(p))); // Mount /dev and a few other filesystems. must!(mount_fs( c"tmpfs", c"/dev", MountAttrFlags::MOUNT_ATTR_NOEXEC | MountAttrFlags::MOUNT_ATTR_STRICTATIME, &[(c"mode", c"0755")], )); must!(mount_fs( c"tmpfs", c"/dev/shm", MountAttrFlags::MOUNT_ATTR_NOEXEC | MountAttrFlags::MOUNT_ATTR_NOSUID | MountAttrFlags::MOUNT_ATTR_NODEV, &[(c"mode", c"1777"), (c"size", c"512m")], )); // TODO: this errors with EPERM. // must!(mount_fs( // "mqueue", // "/dev/mqueue", // MountAttrFlags::MOUNT_ATTR_NOEXEC // | MountAttrFlags::MOUNT_ATTR_NOSUID // | MountAttrFlags::MOUNT_ATTR_NODEV, // &[], // )); must!(mount_fs( c"devpts", c"/dev/pts", MountAttrFlags::MOUNT_ATTR_NOEXEC | MountAttrFlags::MOUNT_ATTR_NOSUID, &[ (c"newinstance", c""), (c"ptmxmode", c"0666"), (c"mode", c"0620"), // TODO: do we need to add a tty group? // ("gid", "5"), ], )); // Symlink /dev/fd -> /proc/self/fd, etc. must!(symlinkat(c"/proc/self/fd", AT_FDCWD, c"/dev/fd")); must!(symlinkat(c"/proc/self/fd/0", AT_FDCWD, c"/dev/stdin")); must!(symlinkat(c"/proc/self/fd/1", AT_FDCWD, c"/dev/stdout")); must!(symlinkat(c"/proc/self/fd/2", AT_FDCWD, c"/dev/stderr")); // Prepare /dev/input. must!(mkdirat( AT_FDCWD, "/dev/input", Mode::from_bits(0o755).unwrap() )); must!(mount_fs( c"tmpfs", c"/run/user", MountAttrFlags::MOUNT_ATTR_NOSUID | MountAttrFlags::MOUNT_ATTR_NODEV | MountAttrFlags::MOUNT_ATTR_RELATIME, &[(c"mode", c"0700"), (c"size", c"1g")], )); must!(mount_fs( c"tmpfs", c"/tmp", MountAttrFlags::MOUNT_ATTR_NOSUID | MountAttrFlags::MOUNT_ATTR_NOATIME, &[(c"mode", c"0777"), (c"size", c"1g")], )); if self.clear_home { must!(mount_fs( c"tmpfs", c"/home", MountAttrFlags::MOUNT_ATTR_NOSUID | MountAttrFlags::MOUNT_ATTR_NOEXEC | MountAttrFlags::MOUNT_ATTR_NOATIME, &[(c"mode", c"0777"), (c"size", c"1g")], )); must!(mkdirat( AT_FDCWD, &self.intern_home_path, Mode::from_bits(0o700).unwrap() )); } // Mount XDG_RUNTIME_DIR. preexec_debug!( "bind-mounting {} to {}", self.extern_run_path.display(), self.intern_run_path.display() ); must!(mkdirat(AT_FDCWD, &self.intern_run_path, Mode::empty())); must!(reattach_mount(detached_run_fd, &self.intern_run_path)); // Mount HOME. if let Some(fd) = detached_home { preexec_debug!( "bind-mounting {} to {}", self.extern_home_path.as_ref().unwrap().display(), self.intern_home_path.display() ); must!(reattach_mount(fd, &self.intern_home_path)); } // Attach detached bind mounts, now that the filesystem is prepared. for (_src_path, dst_path, is_dir, mount_fd) in bind_mounts { if let Some(detached_mount_fd) = mount_fd.take() { preexec_debug!( "bind-mounting {} (outside) to {} (inside)", _src_path.display(), dst_path.display() ); if *is_dir { let _ = mkdirat(AT_FDCWD, &*dst_path, Mode::empty()); } else { must!(touch(&*dst_path, Mode::empty())); } must!(reattach_mount(detached_mount_fd, dst_path)); } } preexec_debug!("finished initial setup, waiting for mmserver"); // Sync with mmserver. must!(sync_barrier(&barrier)); must!(sync_barrier(&barrier)); // Finally, internal bind mounts. We do this after syncing with mmserver // in case mmserver wants us to bind-mount something it just mounted. for (src_path, dst_path, is_dir) in &self.internal_bind_mounts { preexec_debug!( "bind-mounting {} to {}", src_path.display(), dst_path.display() ); let fd = must!(detach_mount(src_path)); if *is_dir { let _ = mkdirat(AT_FDCWD, dst_path, Mode::empty()); } else { must!(touch(dst_path, Mode::empty())); } must!(reattach_mount(fd, dst_path)); } // TODO: Install seccomp handlers here. // We don't trust std::os::Command's env handling, because sometimes // it allocates. libc::clearenv(); for v in &mut self.envs { libc::putenv(v.as_ptr() as *mut _); } // If successful, this never returns. let _e = self.child_cmd.exec(); preexec_debug!("execve failed: {_e}"); libc::_exit(1); } } fn set_uid_map(child_pid: i32, uid: rustix::fs::Uid, gid: rustix::fs::Gid) -> anyhow::Result<()> { let uid = uid.as_raw(); let gid = gid.as_raw(); write( OpenOptions::new() .write(true) .open(format!("/proc/{}/setgroups", child_pid))?, b"deny", ) .context("failed to write setgroups=deny")?; write( OpenOptions::new() .write(true) .open(format!("/proc/{}/uid_map", child_pid)) .context("open failed")?, format!("{uid} {uid} 1\n").as_bytes(), ) .context("failed to write uid_map")?; write( OpenOptions::new() .write(true) .open(format!("/proc/{}/gid_map", child_pid)) .context("open failed")?, format!("{gid} {gid} 1\n").as_bytes(), ) .context("failed to write gid_map")?; Ok(()) } fn run_in_container(ns_pidfd: impl AsFd, stderr: Option>, f: F) -> io::Result<()> where F: FnOnce() -> io::Result<()>, { let child_pid = unsafe { libc::fork() }; if child_pid == -1 { return Err(io::Error::last_os_error()); } else if child_pid == 0 { unsafe { if let Some(fd) = &stderr { let _ = rustix::stdio::dup2_stderr(fd.as_fd()); // Replace stderr. } must!(set_parent_process_death_signal(Some(Signal::KILL))); must!(move_into_link_name_space( ns_pidfd.as_fd(), Some(LinkNameSpaceType::User) )); must!(move_into_link_name_space( ns_pidfd.as_fd(), Some(LinkNameSpaceType::Mount) )); if let Err(_e) = f() { preexec_debug!("run_in_container: {_e}"); libc::_exit(1); } libc::_exit(0); } } loop { match waitpid( Some(Pid::from_raw(child_pid).unwrap()), WaitOptions::empty(), ) { Ok(st) => match st { Some((_, st)) if st.as_raw() == 0 => return Ok(()), _ => return Err(io::Error::other("forked process exited with error")), }, Err(Errno::INTR) => continue, Err(e) => return Err(e.into()), } } } pub(super) fn fs_mount_into( ns_pidfd: impl AsFd, dst: impl AsRef, fsname: String, attr: MountAttrFlags, options: &[(&CStr, &CStr)], ) -> io::Result<()> { debug!("mounting {fsname} to {}", dst.as_ref().display()); let fsname = CString::new(fsname).unwrap(); let dst = CString::new(dst.as_ref().as_os_str().as_encoded_bytes()).unwrap(); run_in_container(ns_pidfd, None, move || { mount_fs(&fsname, &dst, attr, options)?; Ok(()) })?; Ok(()) } pub(super) fn fuse_mount_into( ns_pidfd: impl AsFd, dst: impl AsRef, fsname: String, st_mode: u32, ) -> io::Result { debug!("mounting {fsname} to {}", dst.as_ref().display()); let (fd_tx, fd_rx) = ipc::fd_oneshot()?; let uid = CString::new(format!("{}", getuid().as_raw())).unwrap(); let gid = CString::new(format!("{}", getgid().as_raw())).unwrap(); let rootmode = CString::new(format!("{st_mode:o}")).unwrap(); let is_dir = FileType::from_raw_mode(st_mode) == FileType::Directory; run_in_container(ns_pidfd, None, move || { let fuse_device_fd = openat( AT_FDCWD, "/dev/fuse", OFlags::RDWR | OFlags::CLOEXEC, Mode::empty(), )?; // Send the fd back to mmserver. fd_tx.send_timeout(fuse_device_fd.try_clone()?, SYNC_TIMEOUT)?; // format! allocates. let mut fd_buf = [0_u8; 32]; let fd_str = { use std::io::Write; write!( &mut io::Cursor::new(&mut fd_buf[..]), "{}", fuse_device_fd.as_raw_fd() )?; CStr::from_bytes_until_nul(&fd_buf[..]) .map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid FD"))? }; if is_dir { let _ = mkdirat(AT_FDCWD, dst.as_ref(), Mode::from_raw_mode(st_mode)); } else { let _ = touch(dst.as_ref(), Mode::from_raw_mode(st_mode)); } let fsfd = fsopen(c"fuse", FsOpenFlags::FSOPEN_CLOEXEC)?; fsconfig_set_string(fsfd.as_fd(), c"fd", fd_str)?; fsconfig_set_string(fsfd.as_fd(), c"user_id", &uid)?; fsconfig_set_string(fsfd.as_fd(), c"group_id", &gid)?; fsconfig_set_string(fsfd.as_fd(), c"rootmode", &rootmode)?; fsconfig_create(fsfd.as_fd())?; let mount_fd = fsmount( fsfd.as_fd(), FsMountFlags::FSMOUNT_CLOEXEC, MountAttrFlags::MOUNT_ATTR_NOEXEC | MountAttrFlags::MOUNT_ATTR_NOSUID | MountAttrFlags::MOUNT_ATTR_NODEV, )?; move_mount( mount_fd.as_fd(), c"", AT_FDCWD, dst.as_ref(), MoveMountFlags::MOVE_MOUNT_F_EMPTY_PATH | MoveMountFlags::MOVE_MOUNT_T_SYMLINKS, )?; Ok(()) })?; fd_rx.recv_timeout(SYNC_TIMEOUT) } fn touch(path: impl AsRef, mode: impl Into) -> rustix::io::Result<()> { let _ = openat( AT_FDCWD, path.as_ref(), OFlags::WRONLY | OFlags::CREATE | OFlags::CLOEXEC, mode.into(), )?; Ok(()) } fn detach_mount(path: impl AsRef) -> rustix::io::Result { open_tree( AT_FDCWD, path.as_ref(), OpenTreeFlags::OPEN_TREE_CLONE | OpenTreeFlags::AT_RECURSIVE | OpenTreeFlags::OPEN_TREE_CLOEXEC, ) } fn reattach_mount(fd: OwnedFd, path: impl AsRef) -> rustix::io::Result<()> { move_mount( fd.as_fd(), "", AT_FDCWD, path.as_ref(), MoveMountFlags::MOVE_MOUNT_F_EMPTY_PATH | MoveMountFlags::MOVE_MOUNT_T_SYMLINKS, ) } fn mount_fs( fstype: &CStr, dst: &CStr, options: MountAttrFlags, configs: &[(&CStr, &CStr)], ) -> rustix::io::Result<()> { preexec_debug!("mounting {fstype:?} on {dst:?}"); let fsfd = fsopen(fstype, FsOpenFlags::FSOPEN_CLOEXEC)?; for (k, v) in configs { if v.is_empty() { fsconfig_set_flag(fsfd.as_fd(), *k)?; } else { fsconfig_set_string(fsfd.as_fd(), *k, *v)?; } } fsconfig_create(fsfd.as_fd())?; let mount_fd = fsmount(fsfd.as_fd(), FsMountFlags::FSMOUNT_CLOEXEC, options)?; let _ = mkdirat(AT_FDCWD, dst, Mode::empty()); move_mount( mount_fd.as_fd(), c"", AT_FDCWD, dst, MoveMountFlags::MOVE_MOUNT_F_EMPTY_PATH | MoveMountFlags::MOVE_MOUNT_T_SYMLINKS, )?; Ok(()) } // Wrapped in a function for compatibility with the must! macro. fn sync_barrier(barrier: &ipc::EventfdBarrier) -> rustix::io::Result<()> { barrier.sync(SYNC_TIMEOUT) } /// Generates a CString in the format key=value, for putenv(3). fn make_putenv(k: impl AsRef, v: impl AsRef) -> CString { CString::new(format!( "{}={}", k.as_ref().to_str().unwrap(), v.as_ref().to_str().unwrap() )) .unwrap() } /// Validates an executable path, and returns the canonical version. fn validate_exe(p: impl AsRef) -> anyhow::Result { let p = p.as_ref(); if p.components().count() == 1 { return find_executable_in_path(p) .ok_or(anyhow!("command {:?} is not in PATH", p.display())); } if !p.is_absolute() { bail!("path {:?} must be absolute", p.display()); } else if !p.exists() { bail!("path {:?} does not exist", p.display()); } else if !is_executable(p)? { bail!("path {:?} is not executable", p.display()); } match p.canonicalize() { Ok(p) => Ok(p), Err(_) => bail!("invalid path: {:?}", p.display()), } } #[cfg(test)] mod test { use std::{fs::File, io::Read as _}; use rustix::pipe::{pipe_with, PipeFlags}; use super::validate_exe; use crate::{config::HomeIsolationMode, container::Container}; #[test_log::test] fn echo() -> anyhow::Result<()> { let mut container = Container::new(vec!["echo".into(), "done".into()], HomeIsolationMode::Tmpfs)?; let (pipe_rx, pipe_tx) = pipe_with(PipeFlags::CLOEXEC)?; container.set_stdout(pipe_tx)?; let mut child = container.spawn()?; child.wait()?; let mut buf = String::new(); File::from(pipe_rx).read_to_string(&mut buf)?; pretty_assertions::assert_eq!(buf, "done\n"); Ok(()) } #[test] fn test_validate_exe() { let cat = pathsearch::find_executable_in_path("cat").unwrap(); assert_eq!(cat, validate_exe("cat").unwrap()); assert_eq!( validate_exe("nonexistent").unwrap_err().to_string(), "command \"nonexistent\" is not in PATH", ); assert_eq!( validate_exe("foo/../bar").unwrap_err().to_string(), "path \"foo/../bar\" must be absolute", ); assert_eq!( validate_exe("/nonexistent").unwrap_err().to_string(), "path \"/nonexistent\" does not exist", ); let f = mktemp::Temp::new_file().unwrap(); assert_eq!( validate_exe(&f).unwrap_err().to_string(), format!("path {:?} is not executable", f.as_path().display()) ) } } ================================================ FILE: mm-server/src/container.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ ffi::CStr, os::fd::{AsFd, BorrowedFd, OwnedFd}, path::{Path, PathBuf}, }; use anyhow::{bail, Context as _}; use rustix::{ mount::MountAttrFlags, process::{Pid, Signal, WaitId, WaitIdOptions}, }; use tracing::{debug, info}; mod ipc; mod runtime; pub use runtime::Container; /// A handle to a running container. pub struct ContainerHandle { pid: Pid, pidfd: OwnedFd, run_path: PathBuf, } impl AsFd for ContainerHandle { fn as_fd(&self) -> BorrowedFd<'_> { self.pidfd() } } impl ContainerHandle { pub fn pid(&self) -> Pid { self.pid } pub(crate) fn pidfd(&self) -> BorrowedFd<'_> { self.pidfd.as_fd() } pub fn signal(&mut self, signal: Signal) -> anyhow::Result<()> { debug!(?signal, pid = self.pid.as_raw_nonzero(), "signaling child"); rustix::process::pidfd_send_signal(self, signal).context("pidfd_send_signal")?; Ok(()) } pub fn wait(&mut self) -> anyhow::Result<()> { let exit_status = rustix::process::waitid(WaitId::PidFd(self.as_fd()), WaitIdOptions::EXITED) .context("waitid")? .and_then(|x| x.exit_status()) .unwrap_or_default(); info!(exit_status, "child process exited"); if exit_status != 0 { bail!("child process exited with status: {exit_status}"); } Ok(()) } /// Mounts a named filesystem inside the container at the given path. pub fn fs_mount( &self, dst: impl AsRef, fstype: impl AsRef, attr: MountAttrFlags, options: impl AsRef<[(S, S)]>, ) -> anyhow::Result<()> where S: AsRef, { let options = options .as_ref() .iter() .map(|(k, v)| (k.as_ref(), v.as_ref())) .collect::>(); runtime::fs_mount_into(&self.pidfd, dst, fstype.as_ref().to_owned(), attr, &options)?; Ok(()) } /// Opens /dev/fuse inside the container, mounts it to the given path, /// and returns the FD for use in a FUSE daemon. pub fn fuse_mount( &self, dst: impl AsRef, fsname: impl AsRef, st_mode: u32, ) -> anyhow::Result { let fd = runtime::fuse_mount_into(&self.pidfd, &dst, fsname.as_ref().to_owned(), st_mode)?; Ok(fd) } } impl Drop for ContainerHandle { fn drop(&mut self) { let _ = std::fs::remove_dir_all(&self.run_path); } } ================================================ FILE: mm-server/src/encoder/dpb.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{collections::BTreeMap, sync::Arc}; use ash::vk; use crate::vulkan::*; #[derive(Debug, Copy, Clone)] pub struct DpbPicture { pub image: vk::Image, pub picture_resource_info: vk::VideoPictureResourceInfoKHR<'static>, pub index: usize, pub currently_active: bool, free: bool, } pub struct DpbPool { _store: Vec, slots: Vec, ids: BTreeMap, } impl DpbPool { /// Creates a DPB pool using one layer of a shared image for each picture. /// Guaranteed to be supported, where distinct images are not, but otherwise /// unoptimal and awkward. pub fn new( vk: Arc, format: vk::Format, width: u32, height: u32, profile: &mut vk::VideoProfileInfoKHR, size: usize, ) -> anyhow::Result { let image = create_dpb_image(vk.clone(), profile, format, width, height, size as u32)?; // Each array layer of the image is used as a separate slot, with a // one-to-one correspondence between the layer index and the slot index. let mut slots = Vec::with_capacity(size); for i in 0..size { slots.push(DpbPicture { image: image.image, picture_resource_info: vk::VideoPictureResourceInfoKHR::default() .image_view_binding(image.view) .coded_extent(vk::Extent2D { width, height }) .base_array_layer(i as u32), index: i, currently_active: false, free: true, }); } Ok(Self { _store: vec![image], slots, ids: BTreeMap::new(), }) } /// Creates a dpb pool using separate images for each slot. pub fn new_separate_images( vk: Arc, format: vk::Format, width: u32, height: u32, profile: &mut vk::VideoProfileInfoKHR, size: usize, ) -> anyhow::Result { let mut store = Vec::with_capacity(size); let mut slots = Vec::with_capacity(size); for i in 0..size { let image = create_dpb_image(vk.clone(), profile, format, width, height, 1)?; slots.push(DpbPicture { image: image.image, picture_resource_info: vk::VideoPictureResourceInfoKHR::default() .image_view_binding(image.view) .coded_extent(vk::Extent2D { width, height }) .base_array_layer(0), index: i, currently_active: false, free: true, }); store.push(image); } Ok(Self { _store: store, slots, ids: BTreeMap::new(), }) } /// Returns the index of a free slot and the backing picture resource for /// it. Note that this does not mark the slot as active, or retain an /// association between a picture ID and the slot. After the setup pic is /// used in an encode operation, it should be marked as active if the /// picture is a reference with `mark_active`. pub fn setup_pic(&self) -> DpbPicture { for slot in &self.slots { if slot.free { return *slot; } } panic!("no free slots in the dpb"); } /// Retrieves the picture, along with its slot index, for a picture ID that /// was previously passed to `mark_active`. pub fn get_pic(&self, id: u32) -> Option { self.ids.get(&id).map(|&slot| self.slots[slot]) } /// Marks a slot as active, with the picture referenced by `id` stored in /// it. Active slots are reserved until marked inactive, and will /// not be overwritten. /// /// The pool maintains a mapping of IDs to slots, so that the slot can be /// retrieved by ID. If an ID is reused, the previous slot is automatically /// marked as free for re-use. pub fn mark_active(&mut self, slot: usize, id: u32) { self.slots[slot].currently_active = true; self.slots[slot].free = false; if let Some(old_slot) = self.ids.insert(id, slot) { self.slots[old_slot].free = true; } } /// Mark a slot as inactive. Inactive slots are always considered free. pub fn mark_inactive(&mut self, slot: usize) { self.slots[slot].currently_active = false; self.slots[slot].free = true; } /// Mark all slots inactive. pub fn clear(&mut self) { self.ids.clear(); for slot in &mut self.slots { slot.currently_active = false; slot.free = true; } } } fn create_dpb_image( vk: Arc, profile: &mut vk::VideoProfileInfoKHR, format: vk::Format, width: u32, height: u32, layers: u32, ) -> anyhow::Result { let image = { let mut profile_list_info = super::single_profile_list_info(profile); let create_info = vk::ImageCreateInfo::default() .image_type(vk::ImageType::TYPE_2D) .format(format) .extent(vk::Extent3D { width, height, depth: 1, }) .mip_levels(1) .array_layers(layers) .samples(vk::SampleCountFlags::TYPE_1) .tiling(vk::ImageTiling::OPTIMAL) .usage(vk::ImageUsageFlags::VIDEO_ENCODE_DPB_KHR) .sharing_mode(vk::SharingMode::EXCLUSIVE) .initial_layout(vk::ImageLayout::UNDEFINED) .push_next(&mut profile_list_info); unsafe { vk.device.create_image(&create_info, None)? } }; let memory = unsafe { bind_memory_for_image(&vk.device, &vk.device_info.memory_props, image)? }; let view = { let create_info = vk::ImageViewCreateInfo::default() .image(image) .view_type(vk::ImageViewType::TYPE_2D_ARRAY) .format(format) .components(vk::ComponentMapping { r: vk::ComponentSwizzle::IDENTITY, g: vk::ComponentSwizzle::IDENTITY, b: vk::ComponentSwizzle::IDENTITY, a: vk::ComponentSwizzle::IDENTITY, }) .subresource_range(vk::ImageSubresourceRange { aspect_mask: vk::ImageAspectFlags::COLOR, base_mip_level: 0, level_count: vk::REMAINING_MIP_LEVELS, base_array_layer: 0, layer_count: vk::REMAINING_ARRAY_LAYERS, }); unsafe { vk.device.create_image_view(&create_info, None)? } }; Ok(VkImage::wrap( vk.clone(), image, view, memory, format, width, height, )) } ================================================ FILE: mm-server/src/encoder/gop_structure.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 #[derive(Debug, Clone, PartialEq, Eq)] pub struct GopFrame { pub stream_position: u64, pub gop_position: u64, pub id: u32, /// The frame IDs this frame references. pub ref_ids: Vec, pub is_keyframe: bool, /// The number of frames referencing this one. pub forward_ref_count: u32, } /// This implements hierarchical P-coding, which looks like this: /// https://eymenkurdoglu.github.io/2016/07/01/hierp-one.html /// /// This is also called a "dyadic" structure by the Vulkan spec (42.17.11. H.264 /// Encode Rate Control). /// /// Each frame references at most one other frame. The pattern repeats every /// (2^(layers-1)) frames, but an intra frame is only used once per GOP. Note /// that a 1-layer structure is equivalent to a flat P structure, with each /// frame referencing the one before. pub struct HierarchicalP { pub layers: u32, pub gop_size: u32, pub mini_gop_size: u32, frame_num: u64, gop_position: u64, needs_refresh: bool, } impl HierarchicalP { pub fn new(layers: u32, gop_size: u32) -> Self { assert!(layers > 0); assert!(layers <= 5); let mini_gop_size = 2_u32.pow(layers - 1); assert_eq!(gop_size % mini_gop_size, 0); Self { layers, gop_size, mini_gop_size, frame_num: 0, gop_position: 0, needs_refresh: true, } } pub fn next_frame(&mut self) -> GopFrame { let mini_gop_pos = (self.frame_num % self.mini_gop_size as u64) as u32; let (layer, ref_layer) = if mini_gop_pos == 0 { (0, 0) } else { let ref_pos = mini_gop_pos ^ (1 << mini_gop_pos.trailing_zeros()); ( temporal_layer(mini_gop_pos, self.layers), temporal_layer(ref_pos, self.layers), ) }; let is_keyframe; if self.needs_refresh && mini_gop_pos == 0 { self.needs_refresh = false; // Close the GOP, and start a new one. self.gop_position = 0; is_keyframe = true; } else { is_keyframe = false; } let ref_ids = if is_keyframe { vec![] } else { vec![ref_layer] }; let forward_ref_count = if layer == 0 { // One for each layer above, plus the next mini-GOP. self.layers } else { // One for each layer above. self.layers - layer - 1 }; // We use the layer as the frame ID. let frame = GopFrame { stream_position: self.frame_num, gop_position: self.gop_position, id: layer, ref_ids, is_keyframe, forward_ref_count, }; self.frame_num += 1; self.gop_position = (self.gop_position + 1) % (self.gop_size as u64); frame } /// Causes a keyframe to be generated at the start of the next mini-GOP. pub fn request_refresh(&mut self) { self.needs_refresh = true } pub fn required_dpb_size(&self) -> usize { // We should have one slot for each layer. std::cmp::max(self.layers as usize, 2) } /// Returns the number of frames per second belonging to a particular layer /// as a fractional number, given the layer and the total framerate. pub fn layer_framerate(&self, layer: u32, base_framerate: u32) -> (u32, u32) { if self.layers == 1 { return (base_framerate, 1); } let frames_per_mini_gop = 2_u32.pow(layer.saturating_sub(1)); // 1, 1, 2, 4, 8, 16... assert!(frames_per_mini_gop <= self.mini_gop_size / 2); (base_framerate * frames_per_mini_gop, self.mini_gop_size) } } fn temporal_layer(frame: u32, layers: u32) -> u32 { if frame == 0 { return 0; } layers - frame.trailing_zeros() - 1 } #[cfg(test)] mod test { use super::*; #[test] fn test_temporal_layer_4_layers() { assert_eq!(temporal_layer(0, 4), 0); assert_eq!(temporal_layer(1, 4), 3); assert_eq!(temporal_layer(2, 4), 2); assert_eq!(temporal_layer(3, 4), 3); assert_eq!(temporal_layer(4, 4), 1); assert_eq!(temporal_layer(5, 4), 3); assert_eq!(temporal_layer(6, 4), 2); assert_eq!(temporal_layer(7, 4), 3); } #[test] fn test_gop() { let mut structure = HierarchicalP::new(3, 60); assert_eq!(structure.gop_size, 60); assert_eq!(structure.mini_gop_size, 4); let expected = [ GopFrame { stream_position: 0, gop_position: 0, id: 0, ref_ids: vec![], is_keyframe: true, forward_ref_count: 3, }, GopFrame { stream_position: 1, gop_position: 1, id: 2, ref_ids: vec![0], is_keyframe: false, forward_ref_count: 0, }, GopFrame { stream_position: 2, gop_position: 2, id: 1, ref_ids: vec![0], is_keyframe: false, forward_ref_count: 1, }, GopFrame { stream_position: 3, gop_position: 3, id: 2, ref_ids: vec![1], is_keyframe: false, forward_ref_count: 0, }, GopFrame { stream_position: 4, gop_position: 4, id: 0, ref_ids: vec![0], is_keyframe: false, forward_ref_count: 3, }, GopFrame { stream_position: 5, gop_position: 5, id: 2, ref_ids: vec![0], is_keyframe: false, forward_ref_count: 0, }, GopFrame { stream_position: 6, gop_position: 6, id: 1, ref_ids: vec![0], is_keyframe: false, forward_ref_count: 1, }, GopFrame { stream_position: 7, gop_position: 7, id: 2, ref_ids: vec![1], is_keyframe: false, forward_ref_count: 0, }, ]; for (i, frame) in expected.iter().enumerate() { assert_eq!(structure.next_frame(), *frame, "Frame {}", i); } } #[test] fn test_flat() { let mut structure = HierarchicalP::new(1, 60); let expected = [ GopFrame { stream_position: 0, gop_position: 0, id: 0, ref_ids: vec![], is_keyframe: true, forward_ref_count: 1, }, GopFrame { stream_position: 1, gop_position: 1, id: 0, ref_ids: vec![0], is_keyframe: false, forward_ref_count: 1, }, GopFrame { stream_position: 2, gop_position: 2, id: 0, ref_ids: vec![0], is_keyframe: false, forward_ref_count: 1, }, GopFrame { stream_position: 3, gop_position: 3, id: 0, ref_ids: vec![0], is_keyframe: false, forward_ref_count: 1, }, ]; for (i, frame) in expected.iter().enumerate() { assert_eq!(structure.next_frame(), *frame, "Frame {}", i); } } } ================================================ FILE: mm-server/src/encoder/h264.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::sync::Arc; use anyhow::{bail, Context}; use ash::vk; use ash::vk::native::{ StdVideoH264ChromaFormatIdc_STD_VIDEO_H264_CHROMA_FORMAT_IDC_420, StdVideoH264PictureParameterSet, StdVideoH264PocType_STD_VIDEO_H264_POC_TYPE_0, StdVideoH264SequenceParameterSet, StdVideoH264SequenceParameterSetVui, }; use bytes::Bytes; use tracing::{debug, trace}; use super::gop_structure::HierarchicalP; use super::rate_control::{self, RateControlMode}; use crate::codec::VideoCodec; use crate::{color::VideoProfile, session::control::VideoStreamParams, vulkan::*}; vk_chain! { pub struct H264EncodeProfile<'a> { pub profile_info: vk::VideoProfileInfoKHR<'a>, pub encode_usage_info: vk::VideoEncodeUsageInfoKHR<'a>, pub h264_profile: vk::VideoEncodeH264ProfileInfoEXT<'a>, } } vk_chain! { pub struct H264EncodeCapabilities<'a> { pub video_caps: vk::VideoCapabilitiesKHR<'a>, pub encode_caps: vk::VideoEncodeCapabilitiesKHR<'a>, pub h264_caps: vk::VideoEncodeH264CapabilitiesEXT<'a>, } } vk_chain! { pub struct H264QualityLevelProperties<'a> { pub props: vk::VideoEncodeQualityLevelPropertiesKHR<'a>, pub h264_props: vk::VideoEncodeH264QualityLevelPropertiesEXT<'a>, } } #[derive(Debug, Default, Clone, Copy)] struct H264Metadata { frame_num: u32, pic_order_cnt: i32, } pub struct H264Encoder { inner: super::EncoderInner, profile: H264EncodeProfile, rc_mode: RateControlMode, structure: HierarchicalP, pic_metadata: Vec, // Indexed by layer. idr_num: u32, frame_num: u32, headers: Bytes, } impl H264Encoder { pub fn new( vk: Arc, params: VideoStreamParams, framerate: u32, sink: impl super::Sink, ) -> anyhow::Result { let (video_loader, encode_loader) = vk.video_apis.as_ref().unwrap(); let op = vk::VideoCodecOperationFlagsKHR::ENCODE_H264_EXT; let (profile, profile_idc) = match params.profile { VideoProfile::Hd => (super::default_profile(op), 100), VideoProfile::Hdr10 => (super::default_hdr10_profile(op), 110), }; let h264_profile_info = vk::VideoEncodeH264ProfileInfoEXT::default().std_profile_idc(profile_idc); let mut profile = H264EncodeProfile::new( profile, super::default_encode_usage(vk.device_info.driver_version.clone()), h264_profile_info, ); let mut caps = H264EncodeCapabilities::default(); unsafe { video_loader .get_physical_device_video_capabilities( vk.device_info.pdevice, &profile.profile_info, caps.as_mut(), ) .context("vkGetPhysicalDeviceVideoCapabilitiesKHR")?; }; trace!("video capabilities: {:#?}", caps.video_caps); trace!("encode capabilities: {:#?}", caps.encode_caps); trace!("h264 capabilities: {:#?}", caps.h264_caps); // unsafe { // let get_info = // vk::PhysicalDeviceVideoEncodeQualityLevelInfoKHR::default() // .video_profile(&profile.profile_info) // .quality_level(quality_level); // encode_loader.get_physical_device_video_encode_quality_level_properties( // vk.device_info.pdevice, // &get_info, // quality_props.as_mut(), // )?; // } // trace!("quality level properties: {:#?}", quality_props.props); // trace!( // "h264 quality level properties: {:#?}", // quality_props.h264_props // ); let structure = super::default_structure( VideoCodec::H264, caps.h264_caps .max_temporal_layer_count .min(caps.encode_caps.max_rate_control_layers), caps.video_caps.max_dpb_slots, )?; let rc_mode = rate_control::select_rc_mode( params, &caps.encode_caps, caps.h264_caps.min_qp.try_into().unwrap_or(17), caps.h264_caps.max_qp.try_into().unwrap_or(50), &structure, ); debug!(?rc_mode, "selected rate control mode"); // TODO check more caps // TODO autoselect level let level_idc = vk::native::StdVideoH264LevelIdc_STD_VIDEO_H264_LEVEL_IDC_5_2; if caps.h264_caps.max_level_idc != 0 && caps.h264_caps.max_level_idc < level_idc { bail!("video resolution too large for hardware"); } assert_eq!( caps.video_caps.picture_access_granularity.width, caps.video_caps.picture_access_granularity.height ); let mb_width = caps.video_caps.picture_access_granularity.width; let mb_height = caps.video_caps.picture_access_granularity.height; trace!("mb size: {mb_width}x{mb_height}"); let aligned_width = params.width.next_multiple_of(mb_width); let aligned_height = params.height.next_multiple_of(mb_height); trace!( "aligned width: {}, height: {}", aligned_width, aligned_height ); // Divide by two because of chroma subsampling, I guess? let crop_right = (aligned_width - params.width) / 2; let crop_bottom = (aligned_height - params.height) / 2; trace!("crop right: {}, bottom: {}", crop_right, crop_bottom); let (colour_primaries, transfer_characteristics, matrix_coefficients) = match params.profile { VideoProfile::Hd => (1, 1, 1), VideoProfile::Hdr10 => (9, 16, 9), }; let mut vui = StdVideoH264SequenceParameterSetVui { colour_primaries, transfer_characteristics, matrix_coefficients, // Unspecified. video_format: 5, ..unsafe { std::mem::zeroed() } }; vui.flags.set_video_signal_type_present_flag(1); vui.flags.set_video_full_range_flag(0); // Narrow range. vui.flags.set_color_description_present_flag(1); let log2_max_frame_num_minus4 = structure .gop_size .next_power_of_two() .ilog2() .saturating_sub(4) as u8; let bit_depth = match params.profile { VideoProfile::Hd => 8, VideoProfile::Hdr10 => 10, }; let mut sps = StdVideoH264SequenceParameterSet { profile_idc, level_idc, chroma_format_idc: StdVideoH264ChromaFormatIdc_STD_VIDEO_H264_CHROMA_FORMAT_IDC_420, bit_depth_chroma_minus8: bit_depth - 8, bit_depth_luma_minus8: bit_depth - 8, max_num_ref_frames: 1, pic_order_cnt_type: StdVideoH264PocType_STD_VIDEO_H264_POC_TYPE_0, log2_max_pic_order_cnt_lsb_minus4: log2_max_frame_num_minus4, log2_max_frame_num_minus4, pic_width_in_mbs_minus1: (aligned_width / mb_width) - 1, pic_height_in_map_units_minus1: (aligned_height / mb_height) - 1, frame_crop_right_offset: crop_right, frame_crop_bottom_offset: crop_bottom, pSequenceParameterSetVui: <*const _>::cast(&vui), ..unsafe { std::mem::zeroed() } }; sps.flags.set_vui_parameters_present_flag(1); sps.flags.set_frame_mbs_only_flag(1); if crop_right > 0 || crop_bottom > 0 { sps.flags.set_frame_cropping_flag(1); } let pps = StdVideoH264PictureParameterSet { ..unsafe { std::mem::zeroed() } }; let sps = [sps]; let pps = [pps]; let h264_add_info = vk::VideoEncodeH264SessionParametersAddInfoEXT::default() .std_sp_ss(&sps) .std_pp_ss(&pps); let mut session_params = vk::VideoEncodeH264SessionParametersCreateInfoEXT::default() .parameters_add_info(&h264_add_info) .max_std_pps_count(1) .max_std_sps_count(1); let inner = super::EncoderInner::new( vk.clone(), params.width, params.height, framerate, structure.required_dpb_size(), profile.as_mut(), caps.video_caps, &mut session_params, sink, )?; // Generate encoded stream headers. let headers = unsafe { let mut h264_get_info = vk::VideoEncodeH264SessionParametersGetInfoEXT::default() .write_std_sps(true) .write_std_pps(true); let mut h264_feedback_info = vk::VideoEncodeH264SessionParametersFeedbackInfoEXT::default(); let mut feedback_info = vk::VideoEncodeSessionParametersFeedbackInfoKHR::default() .push_next(&mut h264_feedback_info); let get_info = vk::VideoEncodeSessionParametersGetInfoKHR::default() .video_session_parameters(inner.session_params) .push_next(&mut h264_get_info); encode_loader .get_encoded_video_session_parameters(&get_info, &mut feedback_info) .context("vkGetEncodedVideoSessionParametersKHR")? }; if headers.is_empty() { bail!("failed to generate sps/pps"); } else { trace!("generated {} bytes of h264 headers", headers.len()); } let pic_metadata = vec![H264Metadata::default(); structure.layers as usize]; Ok(Self { inner, profile, rc_mode, structure, pic_metadata, idr_num: 0, frame_num: 0, headers: Bytes::copy_from_slice(&headers), }) } pub unsafe fn submit_encode( &mut self, input: &VkImage, tp_acquire: VkTimelinePoint, tp_release: VkTimelinePoint, ) -> anyhow::Result<()> { let frame_state = self.structure.next_frame(); if frame_state.is_keyframe { self.idr_num += 1; } if frame_state.gop_position == 0 { self.frame_num = 0; } let pattern = if self.structure.layers > 1 { vk::VideoEncodeH264RateControlFlagsEXT::TEMPORAL_LAYER_PATTERN_DYADIC } else { vk::VideoEncodeH264RateControlFlagsEXT::REFERENCE_PATTERN_FLAT }; let mut h264_rc_layers = Vec::new(); let mut rc_layers = Vec::new(); if let RateControlMode::Vbr(vbr) = self.rc_mode { let layer_settings = (0..self.structure.layers) .map(|layer| vbr.layer(layer)) .collect::>(); for settings in &layer_settings { h264_rc_layers.push( vk::VideoEncodeH264RateControlLayerInfoEXT::default() .use_min_qp(true) .use_max_qp(true) .min_qp(vk::VideoEncodeH264QpEXT { qp_i: settings.min_qp as i32, qp_p: settings.min_qp as i32, qp_b: settings.min_qp as i32, }) .max_qp(vk::VideoEncodeH264QpEXT { qp_i: settings.max_qp as i32, qp_p: settings.max_qp as i32, qp_b: settings.max_qp as i32, }), ); } // We can't do this in one step because the borrow checker doesn't // like the way push_next borrows. // TODO: Ash 0.39 may make this easier. for (layer, (settings, h264)) in layer_settings .iter() .zip(h264_rc_layers.iter_mut()) .enumerate() { let (fps_numerator, fps_denominator) = self .structure .layer_framerate(layer as u32, self.inner.framerate); rc_layers.push( vk::VideoEncodeRateControlLayerInfoKHR::default() .max_bitrate(settings.peak_bitrate) .average_bitrate(settings.average_bitrate) .frame_rate_numerator(fps_numerator) .frame_rate_denominator(fps_denominator) .push_next(h264), ); } } let mut h264_rc_info = vk::VideoEncodeH264RateControlInfoEXT::default() .gop_frame_count(self.structure.gop_size) .idr_period(self.structure.gop_size) .consecutive_b_frame_count(0) .temporal_layer_count(rc_layers.len() as u32) .flags(vk::VideoEncodeH264RateControlFlagsEXT::REGULAR_GOP | pattern); let vbv_size = match self.rc_mode { RateControlMode::Vbr(vbr) => vbr.vbv_size_ms, _ => 0, }; let mut rc_info = vk::VideoEncodeRateControlInfoKHR::default() .rate_control_mode(self.rc_mode.as_vk_flags()) .virtual_buffer_size_in_ms(vbv_size) .layers(&rc_layers); // Doesn't have a push_next method, because we're supposed to call it on the // parent struct. rc_info.p_next = <*mut _>::cast(&mut h264_rc_info); let weight_table: vk::native::StdVideoEncodeH264WeightTable = std::mem::zeroed(); let slice_type = if frame_state.is_keyframe { vk::native::StdVideoH264SliceType_STD_VIDEO_H264_SLICE_TYPE_I } else { vk::native::StdVideoH264SliceType_STD_VIDEO_H264_SLICE_TYPE_P }; let primary_pic_type = if frame_state.is_keyframe { vk::native::StdVideoH264PictureType_STD_VIDEO_H264_PICTURE_TYPE_IDR } else { vk::native::StdVideoH264PictureType_STD_VIDEO_H264_PICTURE_TYPE_P }; let mut std_slice_header = vk::native::StdVideoEncodeH264SliceHeader { slice_type, pWeightTable: &weight_table, ..std::mem::zeroed() }; // Per the spec, this indicates that all slices in the picture are the same. std_slice_header.slice_type += 5; let nalu_slice_entries = [vk::VideoEncodeH264NaluSliceInfoEXT::default() .std_slice_header(&std_slice_header) .constant_qp(if let RateControlMode::ConstantQp(qp) = self.rc_mode { qp.layer(frame_state.id) as i32 } else { 0 })]; let list0_mod_ops = std::mem::zeroed(); let list1_mod_ops = std::mem::zeroed(); let marking_ops = std::mem::zeroed(); let mut ref_lists_info = vk::native::StdVideoEncodeH264ReferenceListsInfo { pRefList0ModOperations: &list0_mod_ops, pRefList1ModOperations: &list1_mod_ops, pRefPicMarkingOperations: &marking_ops, RefPicList0: [u8::MAX; 32], RefPicList1: [u8::MAX; 32], ..std::mem::zeroed() }; // Point to the references. for (idx, id) in frame_state.ref_ids.iter().enumerate() { let slot = self .inner .dpb .get_pic(*id) .ok_or(anyhow::anyhow!("ref pic {id} missing from dpb",))?; ref_lists_info.RefPicList0[idx] = slot.index as u8; } let mut std_pic_info = vk::native::StdVideoEncodeH264PictureInfo { flags: std::mem::zeroed(), seq_parameter_set_id: 0, pic_parameter_set_id: 0, idr_pic_id: self.idr_num as u16, primary_pic_type, frame_num: self.frame_num, PicOrderCnt: frame_state.gop_position as i32, temporal_id: frame_state.id as u8, pRefLists: &ref_lists_info, ..std::mem::zeroed() }; std_pic_info .flags .set_IdrPicFlag(frame_state.is_keyframe as u32); std_pic_info .flags .set_is_reference((frame_state.forward_ref_count > 0) as u32); let mut h264_pic_info = vk::VideoEncodeH264PictureInfoEXT::default() .nalu_slice_entries(&nalu_slice_entries) .std_picture_info(&std_pic_info); let mut std_ref_infos = frame_state .ref_ids .iter() .map(|id| vk::native::StdVideoEncodeH264ReferenceInfo { FrameNum: self.pic_metadata[*id as usize].frame_num, PicOrderCnt: self.pic_metadata[*id as usize].pic_order_cnt, temporal_id: *id as u8, ..std::mem::zeroed() }) .collect::>(); let mut ref_info = std_ref_infos .iter_mut() .map(|info| vk::VideoEncodeH264DpbSlotInfoEXT::default().std_reference_info(info)) .collect::>(); let setup_std_ref_info = vk::native::StdVideoEncodeH264ReferenceInfo { FrameNum: self.frame_num, PicOrderCnt: frame_state.gop_position as i32, temporal_id: frame_state.id as u8, ..std::mem::zeroed() }; trace!( frame_num = self.frame_num, pic_order_cnt = frame_state.gop_position, "setting up h264 pic" ); let mut setup_info = vk::VideoEncodeH264DpbSlotInfoEXT::default().std_reference_info(&setup_std_ref_info); let insert = if frame_state.is_keyframe { Some(self.headers.clone()) } else { None }; self.inner.submit_encode( input, tp_acquire, tp_release, &frame_state, &mut rc_info, &mut h264_pic_info, &mut setup_info, &mut ref_info, insert, )?; // Save the reference info for the DPB slot we just wrote. self.pic_metadata[frame_state.id as usize] = H264Metadata { frame_num: self.frame_num, pic_order_cnt: frame_state.gop_position as i32, }; // This is supposed to increment only for reference frames. if frame_state.forward_ref_count > 0 { self.frame_num += 1; } Ok(()) } pub fn input_format(&self) -> vk::Format { self.inner.input_format } pub fn create_input_image(&mut self) -> anyhow::Result { self.inner.create_input_image(self.profile.as_mut()) } pub fn request_refresh(&mut self) { self.structure.request_refresh() } } ================================================ FILE: mm-server/src/encoder/h265.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::sync::Arc; use anyhow::{bail, Context}; use ash::vk; use bytes::Bytes; use tracing::{debug, trace}; use super::gop_structure::HierarchicalP; use super::rate_control::{self, RateControlMode}; use crate::codec::VideoCodec; use crate::color::VideoProfile; use crate::{session::control::VideoStreamParams, vulkan::*}; vk_chain! { pub struct H265EncodeProfile<'a> { pub profile_info: vk::VideoProfileInfoKHR<'a>, pub encode_usage_info: vk::VideoEncodeUsageInfoKHR<'a>, pub h265_profile: vk::VideoEncodeH265ProfileInfoEXT<'a>, } } vk_chain! { pub struct H265EncodeCapabilities<'a> { pub video_caps: vk::VideoCapabilitiesKHR<'a>, pub encode_caps: vk::VideoEncodeCapabilitiesKHR<'a>, pub h265_caps: vk::VideoEncodeH265CapabilitiesEXT<'a>, } } vk_chain! { pub struct H265QualityLevelProperties<'a> { pub props: vk::VideoEncodeQualityLevelPropertiesKHR<'a>, pub h265_props: vk::VideoEncodeH265QualityLevelPropertiesEXT<'a>, } } #[derive(Debug, Default, Clone, Copy)] struct H265Metadata { pic_type: u32, pic_order_cnt: i32, ref_count: u32, } pub struct H265Encoder { inner: super::EncoderInner, profile: H265EncodeProfile, rc_mode: super::rate_control::RateControlMode, structure: HierarchicalP, pic_metadata: Vec, // Indexed by layer. idr_num: u32, frame_num: u32, headers: Bytes, } impl H265Encoder { pub fn new( vk: Arc, params: VideoStreamParams, framerate: u32, sink: impl super::Sink, ) -> anyhow::Result { let (video_loader, encode_loader) = vk.video_apis.as_ref().unwrap(); let op = vk::VideoCodecOperationFlagsKHR::ENCODE_H265_EXT; let (profile, profile_idc) = match params.profile { VideoProfile::Hd => (super::default_profile(op), 1), // Main VideoProfile::Hdr10 => (super::default_hdr10_profile(op), 2), // Main10 }; let h265_profile_info = vk::VideoEncodeH265ProfileInfoEXT::default().std_profile_idc(profile_idc); let mut profile = H265EncodeProfile::new( profile, super::default_encode_usage(vk.device_info.driver_version.clone()), h265_profile_info, ); let mut caps = H265EncodeCapabilities::default(); unsafe { video_loader .get_physical_device_video_capabilities( vk.device_info.pdevice, &profile.profile_info, caps.as_mut(), ) .context("vkGetPhysicalDeviceVideoCapabilitiesKHR")?; }; trace!("video capabilities: {:#?}", caps.video_caps); trace!("encode capabilities: {:#?}", caps.encode_caps); trace!("h265 capabilities: {:#?}", caps.h265_caps); let quality_level = caps.encode_caps.max_quality_levels - 1; let mut quality_props = H265QualityLevelProperties::default(); unsafe { let get_info = vk::PhysicalDeviceVideoEncodeQualityLevelInfoKHR::default() .video_profile(&profile.profile_info) .quality_level(quality_level); encode_loader.get_physical_device_video_encode_quality_level_properties( vk.device_info.pdevice, &get_info, quality_props.as_mut(), )?; } trace!("quality level properties: {:#?}", quality_props.props); trace!( "h265 quality level properties: {:#?}", quality_props.h265_props ); let structure = super::default_structure( VideoCodec::H265, caps.h265_caps .max_sub_layer_count .min(caps.encode_caps.max_rate_control_layers), caps.video_caps.max_dpb_slots, )?; let rc_mode = rate_control::select_rc_mode( params, &caps.encode_caps, caps.h265_caps.min_qp.try_into().unwrap_or(17), caps.h265_caps.max_qp.try_into().unwrap_or(50), &structure, ); debug!(?rc_mode, "selected rate control mode"); // TODO check more caps // TODO autoselect level let level_idc = vk::native::StdVideoH265LevelIdc_STD_VIDEO_H265_LEVEL_IDC_5_2; if caps.h265_caps.max_level_idc != 0 && caps.h265_caps.max_level_idc < level_idc { bail!("video resolution too large for hardware"); } const CTB_SIZES: [(vk::VideoEncodeH265CtbSizeFlagsEXT, usize); 3] = [ (vk::VideoEncodeH265CtbSizeFlagsEXT::TYPE_16, 16), (vk::VideoEncodeH265CtbSizeFlagsEXT::TYPE_32, 32), (vk::VideoEncodeH265CtbSizeFlagsEXT::TYPE_64, 64), ]; let min_ctb = CTB_SIZES .iter() .filter(|(flag, _)| caps.h265_caps.ctb_sizes.contains(*flag)) .map(|(_, size)| *size) .min() .expect("no ctb size found"); let max_ctb = CTB_SIZES .iter() .filter(|(flag, _)| caps.h265_caps.ctb_sizes.contains(*flag)) .map(|(_, size)| *size) .max() .expect("no ctb size found"); const TBS_SIZES: [(vk::VideoEncodeH265TransformBlockSizeFlagsEXT, usize); 4] = [ (vk::VideoEncodeH265TransformBlockSizeFlagsEXT::TYPE_4, 4), (vk::VideoEncodeH265TransformBlockSizeFlagsEXT::TYPE_8, 8), (vk::VideoEncodeH265TransformBlockSizeFlagsEXT::TYPE_16, 16), (vk::VideoEncodeH265TransformBlockSizeFlagsEXT::TYPE_32, 32), ]; let min_tbs = TBS_SIZES .iter() .filter(|(flag, _)| caps.h265_caps.transform_block_sizes.contains(*flag)) .map(|(_, size)| *size) .min() .expect("no tbs size found"); let max_tbs = TBS_SIZES .iter() .filter(|(flag, _)| caps.h265_caps.transform_block_sizes.contains(*flag)) .map(|(_, size)| *size) .max() .expect("no tbs size found"); let aligned_width = params .width .next_multiple_of(caps.encode_caps.encode_input_picture_granularity.width); let aligned_height = params .height .next_multiple_of(caps.encode_caps.encode_input_picture_granularity.height); trace!( min_ctb, max_ctb, min_tbs, max_tbs, aligned_width, aligned_height, "block sizes", ); let crop_right = (aligned_width - params.width) / 2; let crop_bottom = (aligned_height - params.height) / 2; trace!("crop right: {}, bottom: {}", crop_right, crop_bottom); let (colour_primaries, transfer_characteristics, matrix_coeffs) = match params.profile { VideoProfile::Hd => (1, 1, 1), VideoProfile::Hdr10 => (9, 16, 9), }; let mut vui = vk::native::StdVideoH265SequenceParameterSetVui { colour_primaries, transfer_characteristics, matrix_coeffs, // Unspecified. video_format: 5, ..unsafe { std::mem::zeroed() } }; vui.flags.set_video_signal_type_present_flag(1); vui.flags.set_colour_description_present_flag(1); vui.flags.set_video_full_range_flag(0); // Narrow range. let ptl = vk::native::StdVideoH265ProfileTierLevel { general_profile_idc: profile_idc, general_level_idc: level_idc, ..unsafe { std::mem::zeroed() } }; // ptl.flags.set_general_progressive_source_flag(1); // ptl.flags.set_general_interlaced_source_flag(0); let layers_minus_1 = (structure.layers - 1) as u8; let mut pbm: vk::native::StdVideoH265DecPicBufMgr = unsafe { std::mem::zeroed() }; pbm.max_dec_pic_buffering_minus1[layers_minus_1 as usize] = (structure.required_dpb_size() - 1) as u8; // No picture reordering. pbm.max_num_reorder_pics[layers_minus_1 as usize] = 0; pbm.max_latency_increase_plus1[layers_minus_1 as usize] = 0; let mut vps = vk::native::StdVideoH265VideoParameterSet { vps_max_sub_layers_minus1: layers_minus_1, pDecPicBufMgr: &pbm, pHrdParameters: std::ptr::null(), pProfileTierLevel: &ptl, ..unsafe { std::mem::zeroed() } }; vps.flags.set_vps_sub_layer_ordering_info_present_flag(1); vps.flags.set_vps_temporal_id_nesting_flag(1); let min_cb = 8_u8; let max_cb = max_ctb; let max_transform_hierarchy_depth = (max_ctb.ilog2() - min_tbs.ilog2()) as u8; let bit_depth = match params.profile { VideoProfile::Hd => 8, VideoProfile::Hdr10 => 10, }; let mut sps = vk::native::StdVideoH265SequenceParameterSet { chroma_format_idc: vk::native::StdVideoH265ChromaFormatIdc_STD_VIDEO_H265_CHROMA_FORMAT_IDC_420, pic_width_in_luma_samples: aligned_width, pic_height_in_luma_samples: aligned_height, sps_max_sub_layers_minus1: layers_minus_1, bit_depth_luma_minus8: bit_depth - 8, bit_depth_chroma_minus8: bit_depth - 8, log2_max_pic_order_cnt_lsb_minus4: 4, log2_min_luma_coding_block_size_minus3: (min_cb.ilog2() - 3) as u8, log2_diff_max_min_luma_coding_block_size: (max_cb.ilog2() - min_cb.ilog2()) as u8, log2_min_luma_transform_block_size_minus2: (min_tbs.ilog2() - 2) as u8, log2_diff_max_min_luma_transform_block_size: (max_tbs.ilog2() - min_tbs.ilog2()) as u8, max_transform_hierarchy_depth_inter: max_transform_hierarchy_depth, max_transform_hierarchy_depth_intra: max_transform_hierarchy_depth, conf_win_right_offset: crop_right, conf_win_bottom_offset: crop_bottom, pProfileTierLevel: &ptl, pDecPicBufMgr: &pbm, pSequenceParameterSetVui: &vui, ..unsafe { std::mem::zeroed() } }; sps.flags.set_conformance_window_flag(1); sps.flags.set_vui_parameters_present_flag(1); sps.flags.set_sps_temporal_id_nesting_flag(1); sps.flags.set_sps_sub_layer_ordering_info_present_flag(1); if caps .h265_caps .std_syntax_flags .contains(vk::VideoEncodeH265StdFlagsEXT::SAMPLE_ADAPTIVE_OFFSET_ENABLED_FLAG_SET) { sps.flags.set_sample_adaptive_offset_enabled_flag(1); } if caps .h265_caps .std_syntax_flags .contains(vk::VideoEncodeH265StdFlagsEXT::TRANSFORM_SKIP_ENABLED_FLAG_SET) { sps.flags.set_transform_skip_context_enabled_flag(1); } let pps = vk::native::StdVideoH265PictureParameterSet { ..unsafe { std::mem::zeroed() } }; let sps = [sps]; let pps = [pps]; let vps = [vps]; let h265_add_info = vk::VideoEncodeH265SessionParametersAddInfoEXT::default() .std_vp_ss(&vps) .std_sp_ss(&sps) .std_pp_ss(&pps); let mut session_params = vk::VideoEncodeH265SessionParametersCreateInfoEXT::default() .parameters_add_info(&h265_add_info) .max_std_vps_count(1) .max_std_pps_count(1) .max_std_sps_count(1); let inner = super::EncoderInner::new( vk.clone(), params.width, params.height, framerate, structure.required_dpb_size(), profile.as_mut(), caps.video_caps, &mut session_params, sink, )?; // Generate encoded stream headers. let headers = unsafe { let mut h265_get_info = vk::VideoEncodeH265SessionParametersGetInfoEXT::default() .write_std_vps(true) .write_std_sps(true) .write_std_pps(true); let mut h265_feedback_info = vk::VideoEncodeH265SessionParametersFeedbackInfoEXT::default(); let mut feedback_info = vk::VideoEncodeSessionParametersFeedbackInfoKHR::default() .push_next(&mut h265_feedback_info); let get_info = vk::VideoEncodeSessionParametersGetInfoKHR::default() .video_session_parameters(inner.session_params) .push_next(&mut h265_get_info); encode_loader .get_encoded_video_session_parameters(&get_info, &mut feedback_info) .context("vkGetEncodedVideoSessionParametersKHR")? }; if headers.is_empty() { bail!("failed to generate sps/pps/vps"); } else { trace!("generated {} bytes of h265 headers", headers.len()); } let pic_metadata = vec![H265Metadata::default(); structure.layers as usize]; Ok(Self { inner, profile, rc_mode, structure, pic_metadata, idr_num: 0, frame_num: 0, headers: Bytes::copy_from_slice(&headers), }) } pub unsafe fn submit_encode( &mut self, input: &VkImage, tp_acquire: VkTimelinePoint, tp_release: VkTimelinePoint, ) -> anyhow::Result<()> { let frame_state = self.structure.next_frame(); if frame_state.is_keyframe { self.idr_num += 1; self.frame_num = 0; } let pattern = if self.structure.layers > 1 { vk::VideoEncodeH265RateControlFlagsEXT::TEMPORAL_SUB_LAYER_PATTERN_DYADIC } else { vk::VideoEncodeH265RateControlFlagsEXT::REFERENCE_PATTERN_FLAT }; let mut h265_rc_layers = Vec::new(); let mut rc_layers = Vec::new(); if let RateControlMode::Vbr(vbr) = self.rc_mode { let layer_settings = (0..self.structure.layers) .map(|layer| vbr.layer(layer)) .collect::>(); for settings in &layer_settings { h265_rc_layers.push( vk::VideoEncodeH265RateControlLayerInfoEXT::default() .use_min_qp(true) .use_max_qp(true) .min_qp(vk::VideoEncodeH265QpEXT { qp_i: settings.min_qp as i32, qp_p: settings.min_qp as i32, qp_b: settings.min_qp as i32, }) .max_qp(vk::VideoEncodeH265QpEXT { qp_i: settings.max_qp as i32, qp_p: settings.max_qp as i32, qp_b: settings.max_qp as i32, }), ); } for (layer, (settings, h265_rc_layer)) in layer_settings .iter() .zip(h265_rc_layers.iter_mut()) .enumerate() { let (fps_numerator, fps_denominator) = self .structure .layer_framerate(layer as u32, self.inner.framerate); rc_layers.push( vk::VideoEncodeRateControlLayerInfoKHR::default() .max_bitrate(settings.peak_bitrate) .average_bitrate(settings.average_bitrate) .frame_rate_numerator(fps_numerator) .frame_rate_denominator(fps_denominator) .push_next(h265_rc_layer), ); } } let mut h265_rc_info = vk::VideoEncodeH265RateControlInfoEXT::default() .gop_frame_count(self.structure.gop_size) .idr_period(self.structure.gop_size) .consecutive_b_frame_count(0) .sub_layer_count(rc_layers.len() as u32) .flags(vk::VideoEncodeH265RateControlFlagsEXT::REGULAR_GOP | pattern); let vbv_size = match self.rc_mode { RateControlMode::Vbr(settings) => settings.vbv_size_ms, _ => 0, }; let mut rc_info = vk::VideoEncodeRateControlInfoKHR::default() .rate_control_mode(self.rc_mode.as_vk_flags()) .virtual_buffer_size_in_ms(vbv_size); if !rc_layers.is_empty() { rc_info = rc_info.layers(&rc_layers); } // Doesn't have a push_next method, because we're supposed to call it on // the parent struct. rc_info.p_next = <*mut _>::cast(&mut h265_rc_info); let weight_table: vk::native::StdVideoEncodeH265WeightTable = std::mem::zeroed(); let slice_type = if frame_state.is_keyframe { vk::native::StdVideoH265SliceType_STD_VIDEO_H265_SLICE_TYPE_I } else { vk::native::StdVideoH265SliceType_STD_VIDEO_H265_SLICE_TYPE_P }; let pic_type = if frame_state.is_keyframe { vk::native::StdVideoH265PictureType_STD_VIDEO_H265_PICTURE_TYPE_IDR } else { vk::native::StdVideoH265PictureType_STD_VIDEO_H265_PICTURE_TYPE_P }; let std_slice_header = vk::native::StdVideoEncodeH265SliceSegmentHeader { slice_type, pWeightTable: &weight_table, MaxNumMergeCand: 5, // Decoders complain if this is zero. The max value is 5. ..std::mem::zeroed() }; let slice_segment_info = [vk::VideoEncodeH265NaluSliceSegmentInfoEXT::default() .std_slice_segment_header(&std_slice_header) .constant_qp(if let RateControlMode::ConstantQp(qp) = self.rc_mode { qp.layer(frame_state.id) as i32 } else { 0 })]; let mut ref_lists_info = vk::native::StdVideoEncodeH265ReferenceListsInfo { RefPicList0: [u8::MAX; 15], RefPicList1: [u8::MAX; 15], ..std::mem::zeroed() }; for (idx, id) in frame_state.ref_ids.iter().enumerate() { let pic = self .inner .dpb .get_pic(*id) .ok_or(anyhow::anyhow!("ref pic {id} missing from dpb"))?; ref_lists_info.RefPicList0[idx] = pic.index as u8; } // For each frame, we have to tell the decoder which pictures will be // used as references in the future, in addition to those that are // references for this frame. let mut ref_ids = if frame_state.is_keyframe { // If we're outputting a keyframe, clear the forward reference counts. for md in &mut self.pic_metadata { md.ref_count = 0; } Vec::new() } else { self.pic_metadata .iter_mut() .enumerate() .filter_map(|(id, md)| { let id = id as u32; if md.ref_count == 0 { None } else if frame_state.ref_ids.contains(&id) { md.ref_count -= 1; Some((id, true)) } else { Some((id, false)) } }) .collect::>() }; // Sort in descending order of POC. ref_ids.sort_by_key(|(id, _)| { std::cmp::Reverse(self.pic_metadata[*id as usize].pic_order_cnt) }); let mut short_term_refs = vk::native::StdVideoH265ShortTermRefPicSet { used_by_curr_pic_s0_flag: 0, num_negative_pics: ref_ids.len() as u8, // No forward refs. used_by_curr_pic_s1_flag: 0, num_positive_pics: 0, ..std::mem::zeroed() }; let pic_order_cnt = frame_state.gop_position as i32; let mut delta_poc = 0; for (idx, (id, is_direct_ref)) in ref_ids.into_iter().enumerate() { // delta_poc accumulates for each step backwards in time we take. // So if a frame references the immediately preceding one and then a // frame four frames ago, the delta_poc values are 1 and 3. // // Taking the modulo allows us to reference frames across a GOP // boundary. delta_poc = (pic_order_cnt - self.pic_metadata[id as usize].pic_order_cnt) .rem_euclid(self.structure.gop_size as i32) - delta_poc; short_term_refs.delta_poc_s0_minus1[idx] = (delta_poc - 1) as u16; if is_direct_ref { short_term_refs.used_by_curr_pic_s0_flag |= 1 << idx; } } let mut std_pic_info = vk::native::StdVideoEncodeH265PictureInfo { pic_type, sps_video_parameter_set_id: 0, pps_seq_parameter_set_id: 0, pps_pic_parameter_set_id: 0, PicOrderCntVal: frame_state.gop_position as i32, TemporalId: frame_state.id as u8, pRefLists: &ref_lists_info, pShortTermRefPicSet: &short_term_refs, ..std::mem::zeroed() }; std_pic_info .flags .set_IrapPicFlag(frame_state.is_keyframe as u32); std_pic_info .flags .set_is_reference((frame_state.forward_ref_count > 0) as u32); if frame_state.is_keyframe { std_pic_info.flags.set_pic_output_flag(1); std_pic_info.flags.set_no_output_of_prior_pics_flag(1); } let mut h265_pic_info = vk::VideoEncodeH265PictureInfoEXT::default() .std_picture_info(&std_pic_info) .nalu_slice_segment_entries(&slice_segment_info); let mut std_ref_infos = frame_state .ref_ids .iter() .map(|id| vk::native::StdVideoEncodeH265ReferenceInfo { pic_type: self.pic_metadata[*id as usize].pic_type, PicOrderCntVal: self.pic_metadata[*id as usize].pic_order_cnt, TemporalId: *id as u8, ..std::mem::zeroed() }) .collect::>(); let mut ref_info = std_ref_infos .iter_mut() .map(|info| vk::VideoEncodeH265DpbSlotInfoEXT::default().std_reference_info(info)) .collect::>(); let setup_std_ref_info = vk::native::StdVideoEncodeH265ReferenceInfo { pic_type, PicOrderCntVal: pic_order_cnt, TemporalId: frame_state.id as u8, ..std::mem::zeroed() }; let mut setup_info = vk::VideoEncodeH265DpbSlotInfoEXT::default().std_reference_info(&setup_std_ref_info); let insert = if frame_state.is_keyframe { Some(self.headers.clone()) } else { None }; self.inner.submit_encode( input, tp_acquire, tp_release, &frame_state, &mut rc_info, &mut h265_pic_info, &mut setup_info, &mut ref_info, insert, )?; // Save the reference info for the DPB slot we just wrote. self.pic_metadata[frame_state.id as usize] = H265Metadata { pic_type, pic_order_cnt, ref_count: frame_state.forward_ref_count, }; // This is supposed to increment only for reference frames. if frame_state.forward_ref_count > 0 { self.frame_num += 1; } Ok(()) } pub fn input_format(&self) -> vk::Format { self.inner.input_format } pub fn create_input_image(&mut self) -> anyhow::Result { self.inner.create_input_image(self.profile.as_mut()) } pub fn request_refresh(&mut self) { self.structure.request_refresh() } } ================================================ FILE: mm-server/src/encoder/rate_control.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use ash::vk; use tracing::warn; use crate::session::control::VideoStreamParams; // Bitrate is defined here in terms of 1080p, and scaled nonlinearly to the // target resolution. Values are indexed by quality preset. Values 7/8/9 are // only used if CRF is unsupported by the driver. const BASELINE_AVG_BITRATE_MBPS: [f32; 10] = [2.5, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 12.0, 25.0, 50.0]; const BASELINE_PEAK_BITRATE_MBPS: [f32; 10] = [5.0, 8.0, 10.0, 15.0, 20.0, 30.0, 40.0, 60.0, 80.0, 100.0]; const BASELINE_DIMS: f32 = 1920.0 * 1080.0; const VBV_SIZE: u32 = 2500; #[derive(Debug, Clone)] pub enum RateControlMode { ConstantQp(CascadingQp), Vbr(LayeredVbr), Defaults, } impl RateControlMode { pub fn as_vk_flags(&self) -> vk::VideoEncodeRateControlModeFlagsKHR { match self { Self::ConstantQp(_) => vk::VideoEncodeRateControlModeFlagsKHR::DISABLED, Self::Vbr(_) => vk::VideoEncodeRateControlModeFlagsKHR::VBR, Self::Defaults => vk::VideoEncodeRateControlModeFlagsKHR::DEFAULT, } } } #[derive(Debug, Clone, Copy)] pub struct CascadingQp { target: u32, max: u32, } impl CascadingQp { pub fn layer(&self, layer: u32) -> u32 { layer_qp(self.target, layer).min(self.max) } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub struct VbrSettings { pub average_bitrate: u64, pub peak_bitrate: u64, pub max_qp: u32, pub min_qp: u32, } #[derive(Debug, Clone, Copy)] pub struct LayeredVbr { pub vbv_size_ms: u32, base: VbrSettings, num_layers: u32, } impl LayeredVbr { pub fn layer(&self, layer: u32) -> VbrSettings { if self.num_layers <= 1 { return self.base; } let bitrate_denominator = 2_u64.pow(layer + 1); let max_qp = layer_qp(self.base.max_qp, layer).clamp(self.base.min_qp, self.base.max_qp); VbrSettings { average_bitrate: self.base.average_bitrate / bitrate_denominator, peak_bitrate: self.base.peak_bitrate / bitrate_denominator, max_qp, min_qp: self.base.min_qp, } } } pub fn select_rc_mode( params: VideoStreamParams, caps: &vk::VideoEncodeCapabilitiesKHR, min_qp: u32, max_qp: u32, structure: &super::gop_structure::HierarchicalP, ) -> RateControlMode { assert!(params.preset <= 9); let min_qp = 17.max(min_qp); let target_qp = 40 - (2 * params.preset); // 22 - 40; let supports_crf = caps .rate_control_modes .contains(vk::VideoEncodeRateControlModeFlagsKHR::DISABLED); let supports_vbr = caps .rate_control_modes .contains(vk::VideoEncodeRateControlModeFlagsKHR::VBR); if params.preset >= 7 && supports_crf { // Presets 7/8/9 use a very low constant QP. RateControlMode::ConstantQp(CascadingQp { target: target_qp.clamp(min_qp, max_qp), max: max_qp, }) } else if supports_vbr { // 6 and lower use VBR, starting with a high peak and reducing as the // presets get lower. let scale = ((params.width * params.height) as f32 / BASELINE_DIMS).sqrt(); const MBPS: f32 = 1_000_000.0; let average_bitrate = (BASELINE_AVG_BITRATE_MBPS[params.preset as usize] * MBPS * scale).round() as u64; let peak_bitrate = (BASELINE_PEAK_BITRATE_MBPS[params.preset as usize] * MBPS * scale).round() as u64; RateControlMode::Vbr(LayeredVbr { vbv_size_ms: VBV_SIZE, base: VbrSettings { average_bitrate, peak_bitrate, min_qp, max_qp: target_qp.clamp(min_qp, max_qp), }, num_layers: structure.layers, }) } else if supports_crf { // Fall back to CRF with a high QP. RateControlMode::ConstantQp(CascadingQp { target: target_qp.clamp(min_qp, max_qp), max: max_qp, }) } else { warn!("no rate control modes available, using driver defaults!"); RateControlMode::Defaults } } /// Determines the constant QP for a layer given the target QP. fn layer_qp(target_qp: u32, layer: u32) -> u32 { // Example: for a target QP of 22, the QP for each layer is: // 22, 27, 29, 31... target_qp + (3 * layer.min(1)) + (layer * 2) } ================================================ FILE: mm-server/src/encoder/stats.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{sync::Arc, time}; use parking_lot::Mutex; #[derive(Default, Clone)] pub struct EncodeStats { inner: Arc>, } struct Inner { start: time::Instant, stream_stats: LayerStats, keyframe_stats: LayerStats, layer_stats: Vec, } impl Default for Inner { fn default() -> Self { let start = time::Instant::now(); Self { start, stream_stats: LayerStats::new(start), keyframe_stats: LayerStats::new(start), layer_stats: Vec::new(), } } } struct LayerStats { start: time::Instant, min: usize, max: usize, total: u64, } impl LayerStats { fn new(start: time::Instant) -> Self { Self { start, min: 0, max: 0, total: 0, } } fn record_frame_size(&mut self, len: usize) { self.total += len as u64; if self.min == 0 || len < self.min { self.min = len; } if len > self.max { self.max = len; } } } impl std::fmt::Debug for LayerStats { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let period = self.start.elapsed(); let mut f = f.debug_struct("EncodeStats"); f.field("frame_min", &self.min); f.field("frame_max", &self.max); f.field("rate", &calculate_rate(period, self.total)); f.finish() } } impl EncodeStats { pub fn record_frame_size(&self, is_keyframe: bool, layer: u32, len: usize) { let mut inner = self.inner.lock(); inner.stream_stats.record_frame_size(len); if is_keyframe { inner.keyframe_stats.record_frame_size(len); } else { let layer = layer as usize; let layers = (layer + 1).max(inner.layer_stats.len()); let start = inner.start; inner .layer_stats .resize_with(layers, || LayerStats::new(start)); inner.layer_stats[layer].record_frame_size(len); } } } impl std::fmt::Debug for EncodeStats { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let inner = self.inner.lock(); let mut f = f.debug_struct("EncodeStats"); f.field("duration", &inner.start.elapsed()); f.field("totals", &inner.stream_stats); f.field("IDR", &inner.keyframe_stats); for (layer, stats) in inner.layer_stats.iter().enumerate() { f.field(&format!("P{layer}"), &stats); } f.finish() } } fn calculate_rate(dur: time::Duration, total: u64) -> f32 { // Total is in bytes, we want mbit/s. let total_mbits = total as f32 / (1024.0 * 1024.0) * 8.0; total_mbits / dur.as_secs_f32() } ================================================ FILE: mm-server/src/encoder.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 // It's not me, it's vulkan. #![allow(clippy::too_many_arguments)] use std::sync::Arc; use std::time; use anyhow::{anyhow, bail, Context}; use ash::vk; use bytes::Bytes; use crossbeam_channel as crossbeam; use tracing::{debug, error, instrument, trace, trace_span}; use self::gop_structure::HierarchicalP; use crate::codec::VideoCodec; use crate::session::control::VideoStreamParams; use crate::vulkan::video::VideoQueueExt; use crate::vulkan::*; mod dpb; mod gop_structure; mod rate_control; mod stats; mod h264; use h264::H264Encoder; mod h265; use h265::H265Encoder; pub enum Encoder { H264(H264Encoder), H265(H265Encoder), } impl Encoder { pub fn new( vk: Arc, params: VideoStreamParams, framerate: u32, sink: impl Sink, ) -> anyhow::Result { match params.codec { VideoCodec::H264 => Ok(Self::H264(H264Encoder::new(vk, params, framerate, sink)?)), VideoCodec::H265 => Ok(Self::H265(H265Encoder::new(vk, params, framerate, sink)?)), _ => bail!("unsupported codec"), } } pub unsafe fn submit_encode( &mut self, image: &VkImage, acquire: VkTimelinePoint, release: VkTimelinePoint, ) -> anyhow::Result<()> { match self { Self::H264(encoder) => encoder.submit_encode(image, acquire, release), Self::H265(encoder) => encoder.submit_encode(image, acquire, release), } } pub fn input_format(&self) -> vk::Format { match self { Self::H264(encoder) => encoder.input_format(), Self::H265(encoder) => encoder.input_format(), } } pub fn create_input_image(&mut self) -> anyhow::Result { match self { Self::H264(encoder) => encoder.create_input_image(), Self::H265(encoder) => encoder.create_input_image(), } } pub fn request_refresh(&mut self) { match self { Encoder::H264(encoder) => encoder.request_refresh(), Encoder::H265(encoder) => encoder.request_refresh(), } } } struct EncoderInner { session: vk::VideoSessionKHR, session_memory: Vec, session_params: vk::VideoSessionParametersKHR, writer_thread_handle: Option>>, submitted_frames: Option>, done_frames: crossbeam::Receiver, dpb: dpb::DpbPool, width: u32, height: u32, framerate: u32, input_format: vk::Format, stats: stats::EncodeStats, vk: Arc, } impl EncoderInner { pub fn new( vk: Arc, width: u32, height: u32, framerate: u32, required_dpb_size: usize, profile: &mut vk::VideoProfileInfoKHR, capabilities: vk::VideoCapabilitiesKHR, session_params: &mut impl vk::ExtendsVideoSessionParametersCreateInfoKHR, sink: impl Sink, ) -> anyhow::Result { if vk.encode_queue.is_none() { bail!("no vulkan video support") } let (video_loader, _encode_loader) = vk.video_apis.as_ref().unwrap(); let encode_family = vk.device_info.encode_family.unwrap(); if capabilities.max_coded_extent.width < width || capabilities.max_coded_extent.height < height { bail!( "video resolution too large: (max {}x{})", capabilities.max_coded_extent.width, capabilities.max_coded_extent.height ); } let format_info = list_format_props( video_loader, vk.device_info.pdevice, profile, vk::ImageUsageFlags::VIDEO_ENCODE_SRC_KHR, )?; for format in &format_info { trace!(?format, "available input format"); } let input_format = match format_info.first() { Some(format) => format.format, None => bail!("unable to determine supported ENCODE_SRC format"), }; trace!(?input_format, width, height, "using input format"); let buffer_size_alignment = capabilities.min_bitstream_buffer_size_alignment as usize; let session = { let create_info = vk::VideoSessionCreateInfoKHR::default() .queue_family_index(encode_family) .flags(vk::VideoSessionCreateFlagsKHR::ALLOW_ENCODE_PARAMETER_OPTIMIZATIONS) .video_profile(profile) .picture_format(input_format) .reference_picture_format(input_format) .max_coded_extent(capabilities.max_coded_extent) .max_dpb_slots(capabilities.max_dpb_slots) .max_active_reference_pictures(capabilities.max_active_reference_pictures) .std_header_version(&capabilities.std_header_version); unsafe { video_loader .create_video_session(&create_info, None) .context("vkCreateVideoSessionKHR")? } }; let session_memory = bind_session_memory(video_loader, &vk.device, &vk.device_info, session)?; let session_params = { let create_info = vk::VideoSessionParametersCreateInfoKHR::default() .video_session(session) .push_next(session_params); unsafe { video_loader .create_video_session_parameters(&create_info, None) .context("vkCreateVideoSessionParametersKHR")? } }; let dpb = if capabilities .flags .contains(vk::VideoCapabilityFlagsKHR::SEPARATE_REFERENCE_IMAGES) { trace!("using separate images for DPB pool"); dpb::DpbPool::new_separate_images( vk.clone(), input_format, width.next_multiple_of(capabilities.picture_access_granularity.width), height.next_multiple_of(capabilities.picture_access_granularity.height), profile, required_dpb_size, )? } else { trace!("using shared image for DPB pool"); dpb::DpbPool::new( vk.clone(), input_format, width.next_multiple_of(capabilities.picture_access_granularity.width), height.next_multiple_of(capabilities.picture_access_granularity.height), profile, required_dpb_size, )? }; let stats = stats::EncodeStats::default(); let (submitted_frames_tx, submitted_frames_rx) = crossbeam::bounded(1); let (done_frames_tx, done_frames_rx) = crossbeam::unbounded(); for _frame in 0..2 { // We need a frame name for each swapframe. #[cfg(feature = "tracy")] let frame_name = [ tracy_client::frame_name!("composite + encode 1"), tracy_client::frame_name!("composite + encode 2"), ][_frame]; done_frames_tx .send(EncoderOutputFrame::new( vk.clone(), width, height, buffer_size_alignment, profile, #[cfg(feature = "tracy")] frame_name, )?) .unwrap(); } let vk_clone = vk.clone(); let stats_clone = stats.clone(); let handle = std::thread::Builder::new() .name("encoder writer".to_owned()) .spawn(move || { writer_thread( vk_clone, submitted_frames_rx, done_frames_tx, sink, stats_clone, ) })?; Ok(Self { session, session_params, session_memory, writer_thread_handle: Some(handle), submitted_frames: Some(submitted_frames_tx), done_frames: done_frames_rx, dpb, width, height, framerate, input_format, stats, vk, }) } fn create_input_image(&self, profile: &mut vk::VideoProfileInfoKHR) -> anyhow::Result { let image = { let mut profile_list_info = single_profile_list_info(profile); let create_info = vk::ImageCreateInfo::default() .image_type(vk::ImageType::TYPE_2D) .format(self.input_format) .extent(vk::Extent3D { width: self.width, height: self.height, depth: 1, }) .mip_levels(1) .array_layers(1) .samples(vk::SampleCountFlags::TYPE_1) .tiling(vk::ImageTiling::OPTIMAL) .usage(vk::ImageUsageFlags::VIDEO_ENCODE_SRC_KHR | vk::ImageUsageFlags::STORAGE) .sharing_mode(vk::SharingMode::EXCLUSIVE) .initial_layout(vk::ImageLayout::UNDEFINED) .flags(vk::ImageCreateFlags::MUTABLE_FORMAT | vk::ImageCreateFlags::EXTENDED_USAGE) .push_next(&mut profile_list_info); unsafe { self.vk .device .create_image(&create_info, None) .context("VkCreateImage")? } }; let memory = unsafe { bind_memory_for_image(&self.vk.device, &self.vk.device_info.memory_props, image)? }; let view = unsafe { let mut usage_info = vk::ImageViewUsageCreateInfo::default() .usage(vk::ImageUsageFlags::VIDEO_ENCODE_SRC_KHR); let create_info = vk::ImageViewCreateInfo::default() .image(image) .view_type(vk::ImageViewType::TYPE_2D) .format(self.input_format) .subresource_range(vk::ImageSubresourceRange { aspect_mask: vk::ImageAspectFlags::COLOR, base_mip_level: 0, level_count: 1, base_array_layer: 0, layer_count: 1, }) .push_next(&mut usage_info); self.vk .device .create_image_view(&create_info, None) .context("VkCreateImageView")? }; Ok(VkImage::wrap( self.vk.clone(), image, view, memory, self.input_format, self.width, self.height, )) } #[instrument(skip_all)] pub unsafe fn submit_encode( &mut self, input: &VkImage, tp_acquire: VkTimelinePoint, tp_release: VkTimelinePoint, frame_state: &gop_structure::GopFrame, rc_info: &mut (impl vk::ExtendsVideoBeginCodingInfoKHR + vk::ExtendsVideoCodingControlInfoKHR), codec_pic_info: &mut impl vk::ExtendsVideoEncodeInfoKHR, codec_setup_info: &mut impl vk::ExtendsVideoReferenceSlotInfoKHR, codec_ref_info: &mut [impl vk::ExtendsVideoReferenceSlotInfoKHR], insert: Option, ) -> anyhow::Result<()> { use ash::vk::Handle; if self.session_params.is_null() { bail!("session parameters not yet created"); } let (video_loader, encode_loader) = self.vk.video_apis.as_ref().unwrap(); let encode_queue = self.vk.encode_queue.as_ref().unwrap(); // "Acquire" a buffer to copy to. This provides backpressure if the // encoder can't keep up. let res = trace_span!("wait_prev_frame").in_scope(|| self.done_frames.recv()); let mut frame = match res { Ok(frame) => frame, Err(_) => { bail!("copy thread died"); } }; #[cfg(feature = "tracy")] { frame.tracy_context.frame = Some( tracy_client::Client::running() .expect("no tracy client") .non_continuous_frame(frame.tracy_context.frame_name), ); if let Some(ref ctx) = encode_queue.tracy_context { frame.tracy_context.encode_span = Some(ctx.span(tracy_client::span_location!("encode"))?); } } begin_command_buffer(&self.vk.device, frame.encode_cb)?; // Record the start timestamp. #[cfg(feature = "tracy")] if let Some(encode_ts_pool) = &mut frame.tracy_context.encode_ts_pool { encode_ts_pool.cmd_reset(&self.vk.device, frame.encode_cb); self.vk.device.cmd_write_timestamp( frame.encode_cb, vk::PipelineStageFlags::TOP_OF_PIPE, encode_ts_pool.pool, 0, ); } // Acquire the image from the graphics queue. insert_image_barrier( &self.vk.device, frame.encode_cb, input.image, Some((self.vk.graphics_queue.family, encode_queue.family)), vk::ImageLayout::GENERAL, vk::ImageLayout::VIDEO_ENCODE_SRC_KHR, vk::PipelineStageFlags2::NONE, vk::AccessFlags2::NONE, vk::PipelineStageFlags2::VIDEO_ENCODE_KHR, vk::AccessFlags2::VIDEO_ENCODE_READ_KHR, ); // Bind the setup picture and any reference pictures. let setup_pic = self.dpb.setup_pic(); let ref_pics = frame_state .ref_ids .iter() .map(|id| { self.dpb .get_pic(*id) .ok_or(anyhow!("ref pic {id} missing from dpb")) }) .collect::>>()?; let mut bound_pics = vec![vk::VideoReferenceSlotInfoKHR::default() .slot_index(if setup_pic.currently_active { setup_pic.index as i32 } else { -1 }) .picture_resource(&setup_pic.picture_resource_info)]; for ref_pic in &ref_pics { assert!(ref_pic.currently_active); bound_pics.push( vk::VideoReferenceSlotInfoKHR::default() .slot_index(ref_pic.index as i32) .picture_resource(&ref_pic.picture_resource_info), ); } trace!( ref_ids = ?frame_state.ref_ids, ref_slots = ?ref_pics.iter().map(|p| p.index).collect::>(), setup_id = frame_state.id, setup_slot = setup_pic.index, gop_position = frame_state.gop_position, is_keyframe = frame_state.is_keyframe, forward_ref_count = frame_state.forward_ref_count, input_image = ?input.image, "encoding frame" ); // Bind the session. { let mut begin_info = vk::VideoBeginCodingInfoKHR::default() .flags(vk::VideoBeginCodingFlagsKHR::empty()) .video_session(self.session) .video_session_parameters(self.session_params) .reference_slots(&bound_pics); // Vulkan wants us to inform it of the current rate control, which // is unset on the first frame. if frame_state.stream_position != 0 { begin_info = begin_info.push_next(rc_info) } unsafe { video_loader.cmd_begin_video_coding(frame.encode_cb, &begin_info); }; } // Reset on keyframes. if frame_state.is_keyframe { let ctrl_info = vk::VideoCodingControlInfoKHR::default() .flags( vk::VideoCodingControlFlagsKHR::RESET | vk::VideoCodingControlFlagsKHR::ENCODE_RATE_CONTROL, ) .push_next(rc_info); unsafe { video_loader.cmd_control_video_coding(frame.encode_cb, &ctrl_info); }; } // Encode. self.vk.device.cmd_begin_query( frame.encode_cb, frame.query_pool, 0, vk::QueryControlFlags::empty(), ); { // The input picture. let src_pic_resource = vk::VideoPictureResourceInfoKHR::default() .coded_extent(vk::Extent2D { width: self.width, height: self.height, }) .image_view_binding(input.view); // The slot we're writing to. let setup_reference_slot = vk::VideoReferenceSlotInfoKHR::default() .slot_index(setup_pic.index as i32) .picture_resource(&setup_pic.picture_resource_info) .push_next(codec_setup_info); // The slots we're referencing. let reference_slots = ref_pics .iter() .zip(codec_ref_info.iter_mut()) .map(|(ref_pic, codec_ref_info)| { vk::VideoReferenceSlotInfoKHR::default() .slot_index(ref_pic.index as i32) .picture_resource(&ref_pic.picture_resource_info) .push_next(codec_ref_info) }) .collect::>(); let encode_info = vk::VideoEncodeInfoKHR::default() .flags(vk::VideoEncodeFlagsKHR::empty()) .dst_buffer(frame.copy_buffer.buffer) .dst_buffer_range(frame.copy_buffer.len as u64) .src_picture_resource(src_pic_resource) .setup_reference_slot(&setup_reference_slot) .reference_slots(&reference_slots) .push_next(codec_pic_info); // Transition the DPB images/layers we need. let mut dpb_barriers = Vec::new(); for pic in &ref_pics { dpb_barriers.push( vk::ImageMemoryBarrier2::default() .src_stage_mask(vk::PipelineStageFlags2::NONE) .src_access_mask(vk::AccessFlags2::NONE) .dst_stage_mask(vk::PipelineStageFlags2::VIDEO_ENCODE_KHR) .dst_access_mask(vk::AccessFlags2::VIDEO_ENCODE_READ_KHR) .old_layout(vk::ImageLayout::VIDEO_ENCODE_DPB_KHR) .new_layout(vk::ImageLayout::VIDEO_ENCODE_DPB_KHR) .image(pic.image) .subresource_range(vk::ImageSubresourceRange { aspect_mask: vk::ImageAspectFlags::COLOR, base_mip_level: 0, level_count: vk::REMAINING_MIP_LEVELS, // For multiple-layers-in-one-image DPB, just the layer referenced. base_array_layer: pic.picture_resource_info.base_array_layer, layer_count: 1, }), ); } dpb_barriers.push( vk::ImageMemoryBarrier2::default() .src_stage_mask(vk::PipelineStageFlags2::NONE) .src_access_mask(vk::AccessFlags2::NONE) .dst_stage_mask(vk::PipelineStageFlags2::VIDEO_ENCODE_KHR) .dst_access_mask( vk::AccessFlags2::VIDEO_ENCODE_WRITE_KHR | vk::AccessFlags2::VIDEO_ENCODE_READ_KHR, ) .old_layout(vk::ImageLayout::VIDEO_ENCODE_DPB_KHR) .new_layout(vk::ImageLayout::VIDEO_ENCODE_DPB_KHR) .image(setup_pic.image) .subresource_range(vk::ImageSubresourceRange { aspect_mask: vk::ImageAspectFlags::COLOR, base_mip_level: 0, level_count: vk::REMAINING_MIP_LEVELS, base_array_layer: setup_pic.picture_resource_info.base_array_layer, layer_count: 1, }), ); self.vk.device.cmd_pipeline_barrier2( frame.encode_cb, &vk::DependencyInfo::default().image_memory_barriers(&dpb_barriers), ); // Update state as if the operation succeeded. if frame_state.forward_ref_count > 0 { // Keyframes clear all dpb slots. if frame_state.is_keyframe { self.dpb.clear(); } self.dpb.mark_active(setup_pic.index, frame_state.id); } else { self.dpb.mark_inactive(setup_pic.index); } unsafe { encode_loader.cmd_encode_video(frame.encode_cb, &encode_info); }; } self.vk .device .cmd_end_query(frame.encode_cb, frame.query_pool, 0); // Unbind the session. { let end_info = vk::VideoEndCodingInfoKHR::default().flags(vk::VideoEndCodingFlagsKHR::empty()); unsafe { video_loader.cmd_end_video_coding(frame.encode_cb, &end_info); }; } // Release the input picture back to the graphics queue. insert_image_barrier( &self.vk.device, frame.encode_cb, input.image, Some((encode_queue.family, self.vk.graphics_queue.family)), vk::ImageLayout::VIDEO_ENCODE_SRC_KHR, vk::ImageLayout::GENERAL, vk::PipelineStageFlags2::VIDEO_ENCODE_KHR, vk::AccessFlags2::VIDEO_ENCODE_WRITE_KHR, vk::PipelineStageFlags2::empty(), vk::AccessFlags2::empty(), ); // Record the end timestamp. #[cfg(feature = "tracy")] if let Some(encode_ts_pool) = &mut frame.tracy_context.encode_ts_pool { self.vk.device.cmd_write_timestamp( frame.encode_cb, vk::PipelineStageFlags::ALL_COMMANDS, encode_ts_pool.pool, 1, ); } #[cfg(feature = "tracy")] if let Some(span) = &mut frame.tracy_context.encode_span { span.end_zone(); } // Wait for the output buffer to be clear of the previous copy // operation, then establish new timeline points. frame.tp_copied.wait()?; frame.tp_encoded += 10; frame.tp_copied = &frame.tp_encoded + 1; // Submit! { self.vk.device.end_command_buffer(frame.encode_cb)?; let cb_infos = [vk::CommandBufferSubmitInfo::default().command_buffer(frame.encode_cb)]; let wait_infos = [vk::SemaphoreSubmitInfo::default() .semaphore(tp_acquire.timeline().as_semaphore()) .value(tp_acquire.into()) .stage_mask(vk::PipelineStageFlags2::ALL_COMMANDS)]; let signal_infos = [ vk::SemaphoreSubmitInfo::default() .semaphore(frame.timeline.as_semaphore()) .value(frame.tp_encoded.value()) .stage_mask(vk::PipelineStageFlags2::ALL_COMMANDS), vk::SemaphoreSubmitInfo::default() .semaphore(tp_release.timeline().as_semaphore()) .value(tp_release.value()) .stage_mask(vk::PipelineStageFlags2::ALL_COMMANDS), ]; let submit_info = vk::SubmitInfo2::default() .wait_semaphore_infos(&wait_infos) .signal_semaphore_infos(&signal_infos) .command_buffer_infos(&cb_infos); let encode_queue = self.vk.encode_queue.as_ref().unwrap(); self.vk .device .queue_submit2(encode_queue.queue, &[submit_info], vk::Fence::null()) .context("vkQueueSubmit")?; } frame.hierarchical_layer = frame_state.id; frame.is_keyframe = frame_state.is_keyframe; if let Some(submitted_frames) = &self.submitted_frames { // Tell the other thread to copy out the finished packet when it's // finished. Optionally insert headers. frame.headers = insert; submitted_frames .send(frame) .map_err(|_| anyhow::anyhow!("writer thread died"))?; } Ok(()) } } impl Drop for EncoderInner { fn drop(&mut self) { drop(self.submitted_frames.take()); for done in self.done_frames.iter() { drop(done) } if let Some(handle) = self.writer_thread_handle.take() { match handle.join() { Ok(Ok(())) => (), Ok(Err(e)) => error!("copy thread exited with error: {:#}", e), Err(_) => error!("copy thread panicked"), } } debug!("stream stats: \n{:#?}", self.stats); let (video_loader, _) = self.vk.video_apis.as_ref().unwrap(); unsafe { self.vk .device .queue_wait_idle(self.vk.encode_queue.as_ref().unwrap().queue) .unwrap(); video_loader.destroy_video_session(self.session, None); video_loader.destroy_video_session_parameters(self.session_params, None); for memory in self.session_memory.drain(..) { self.vk.device.free_memory(memory, None); } } } } /// A synchronized buffer for writing encoded frames to. Passed back and forth /// between the submission thread and the copy thread. struct EncoderOutputFrame { encode_cb: vk::CommandBuffer, copy_buffer: VkHostBuffer, query_pool: vk::QueryPool, hierarchical_layer: u32, is_keyframe: bool, headers: Option, timeline: VkTimelineSemaphore, tp_encoded: VkTimelinePoint, tp_copied: VkTimelinePoint, #[cfg(feature = "tracy")] tracy_context: TracingContext, vk: Arc, } #[cfg(feature = "tracy")] struct TracingContext { frame_name: tracy_client::FrameName, frame: Option, encode_span: Option, encode_ts_pool: Option, } impl EncoderOutputFrame { pub fn new( vk: Arc, width: u32, height: u32, buffer_size_alignment: usize, profile: &mut vk::VideoProfileInfoKHR, #[cfg(feature = "tracy")] frame_name: tracy_client::FrameName, ) -> anyhow::Result { let buffer_size = (width * height * 3).next_multiple_of(buffer_size_alignment as u32); let mut profile_list_info = single_profile_list_info(profile); let copy_buffer = { let buf = { let create_info = vk::BufferCreateInfo::default() .size(buffer_size as u64) .sharing_mode(vk::SharingMode::EXCLUSIVE) .usage(vk::BufferUsageFlags::VIDEO_ENCODE_DST_KHR) .push_next(&mut profile_list_info); unsafe { vk.device.create_buffer(&create_info, None)? } }; let requirements = unsafe { vk.device.get_buffer_memory_requirements(buf) }; let alloc_info = vk::MemoryAllocateInfo::default() .allocation_size(requirements.size) .memory_type_index(vk.device_info.host_visible_mem_type_index); let memory = unsafe { vk.device.allocate_memory(&alloc_info, None)? }; unsafe { vk.device .bind_buffer_memory(buf, memory, 0) .context("vkBindBufferMemory")? }; VkHostBuffer::wrap(vk.clone(), buf, memory, buffer_size as usize) }; let encode_queue = vk.encode_queue.as_ref().unwrap(); let encode_cb = allocate_command_buffer(&vk.device, encode_queue.command_pool)?; let query_pool = { let mut video_pool_info = vk::QueryPoolVideoEncodeFeedbackCreateInfoKHR::default() .encode_feedback_flags( vk::VideoEncodeFeedbackFlagsKHR::BITSTREAM_BUFFER_OFFSET | vk::VideoEncodeFeedbackFlagsKHR::BITSTREAM_BYTES_WRITTEN, ); let create_info = vk::QueryPoolCreateInfo::default() .query_type(vk::QueryType::VIDEO_ENCODE_FEEDBACK_KHR) .query_count(1) .push_next(profile) .push_next(&mut video_pool_info); unsafe { let query_pool = vk .device .create_query_pool(&create_info, None) .context("vkCreateQueryPool")?; vk.device.reset_query_pool(query_pool, 0, 1); query_pool } }; let timeline = VkTimelineSemaphore::new(vk.clone(), 0)?; #[cfg(feature = "tracy")] let encode_ts_pool = if matches!( vk.device_info.driver_version, DriverVersion::MesaRadv { .. } ) { // RADV offers support for timestamp queries, but then has an // assertion at timestamp write time. None } else { create_timestamp_query_pool(&vk.device, 2).ok() }; Ok(EncoderOutputFrame { encode_cb, copy_buffer, query_pool, hierarchical_layer: 0, is_keyframe: false, headers: None, tp_encoded: timeline.new_point(0), tp_copied: timeline.new_point(0), timeline, #[cfg(feature = "tracy")] tracy_context: TracingContext { frame_name, frame: None, encode_span: None, encode_ts_pool, }, vk, }) } } impl Drop for EncoderOutputFrame { fn drop(&mut self) { unsafe { let device = &self.vk.device; let encode_queue = self.vk.encode_queue.as_ref().unwrap(); device.queue_wait_idle(encode_queue.queue).unwrap(); device.free_command_buffers(encode_queue.command_pool, &[self.encode_cb]); device.destroy_query_pool(self.query_pool, None); #[cfg(feature = "tracy")] if let Some(pool) = self.tracy_context.encode_ts_pool.take() { device.destroy_query_pool(pool.pool, None); } } } } // SAFETY: the contained pointers are nothing fancy. unsafe impl Send for EncoderOutputFrame {} /// Allows the caller to decide where to sink the frames. pub trait Sink: Send + 'static { fn write_frame( &mut self, ts: time::Instant, frame: Bytes, hierarchical_layer: u32, is_keyframe: bool, ); } #[repr(C)] #[derive(Debug, Clone, Copy, Default)] struct QueryResults { offset: i32, size: i32, result: i32, } /// Responsible for copying encoded frames from the output buffer and /// dispatching them to the client. Passes instances of `EncodedOutputFrame` /// back and forth with the main thread. fn writer_thread( vk: Arc, input: crossbeam::Receiver, done: crossbeam::Sender, mut sink: impl Sink, stats: stats::EncodeStats, ) -> anyhow::Result<()> { let device = &vk.device; let mut capture_ts = time::Instant::now(); for mut frame in input { let dur = capture_ts.elapsed(); capture_ts = time::Instant::now(); // Wait for the frame to finish encoding. unsafe { frame.tp_encoded.wait()?; } #[cfg(feature = "tracy")] { frame.tracy_context.frame.take(); if let Some(span) = frame.tracy_context.encode_span.take() { if let Some(pool) = &mut frame.tracy_context.encode_ts_pool { let timestamps = pool.fetch_results(device)?; span.upload_timestamp(timestamps[0], timestamps[1]) } } } // Get the buffer offsets for the encoded data. let mut results = [QueryResults::default()]; unsafe { device .get_query_pool_results( frame.query_pool, 0, &mut results, vk::QueryResultFlags::WITH_STATUS_KHR, ) .context("vkGetQueryPoolResults")?; device.reset_query_pool(frame.query_pool, 0, 1) } let res = vk::QueryResultStatusKHR::from_raw(results[0].result); if res != vk::QueryResultStatusKHR::COMPLETE { bail!("encode failed: {:?}", res); } trace!(len = results[0].size, ?dur, "encoded packet"); stats.record_frame_size( frame.is_keyframe, frame.hierarchical_layer, results[0].size as usize, ); let data = unsafe { let ptr = frame.copy_buffer.access as *const u8; std::slice::from_raw_parts( ptr.add(results[0].offset as usize), results[0].size as usize, ) }; // Prepend any headers. let data = if let Some(headers) = frame.headers.take() { let mut buf = bytes::BytesMut::from(headers); buf.extend_from_slice(data); buf.freeze() } else { Bytes::copy_from_slice(data) }; unsafe { frame.tp_copied.signal()?; } sink.write_frame( capture_ts, data, frame.hierarchical_layer, frame.is_keyframe, ); done.send(frame).ok(); } Ok(()) } fn list_format_props<'a>( video_loader: &'a VideoQueueExt, pdevice: vk::PhysicalDevice, profile: &mut vk::VideoProfileInfoKHR, usage: vk::ImageUsageFlags, ) -> anyhow::Result>> { let mut profile_list_info = single_profile_list_info(profile); let format_info = vk::PhysicalDeviceVideoFormatInfoKHR::default() .image_usage(usage) .push_next(&mut profile_list_info); let props = unsafe { video_loader .get_physical_device_video_format_properties(pdevice, &format_info) .context("vkGetVideoFormatPropertiesKHR")? }; Ok(props) } fn bind_session_memory( video_loader: &VideoQueueExt, device: &ash::Device, device_info: &VkDeviceInfo, session: vk::VideoSessionKHR, ) -> anyhow::Result> { let mut session_memory = Vec::new(); let reqs = unsafe { video_loader.get_video_session_memory_requirements(session)? }; let mut binds = Vec::new(); for req in reqs.into_iter() { let memory = { let mut alloc_info = vk::MemoryAllocateInfo::default().allocation_size(req.memory_requirements.size); let mem_type_idx = select_memory_type( &device_info.memory_props, vk::MemoryPropertyFlags::DEVICE_LOCAL, Some(req.memory_requirements.memory_type_bits), ) .or_else(|| { select_memory_type( &device_info.memory_props, vk::MemoryPropertyFlags::empty(), Some(req.memory_requirements.memory_type_bits), ) }); if mem_type_idx.is_none() { bail!("no suitable memory type for video session"); } alloc_info = alloc_info.memory_type_index(mem_type_idx.unwrap()); unsafe { device .allocate_memory(&alloc_info, None) .context("vkAllocateMemory")? } }; session_memory.push(memory); binds.push( vk::BindVideoSessionMemoryInfoKHR::default() .memory_bind_index(req.memory_bind_index) .memory(memory) .memory_size(req.memory_requirements.size), ); } unsafe { video_loader .bind_video_session_memory(device, session, &binds) .context("vkBindVideoSessionMemory")? } Ok(session_memory) } fn default_profile(op: vk::VideoCodecOperationFlagsKHR) -> vk::VideoProfileInfoKHR<'static> { vk::VideoProfileInfoKHR::default() .video_codec_operation(op) .chroma_subsampling(vk::VideoChromaSubsamplingFlagsKHR::TYPE_420) .chroma_bit_depth(vk::VideoComponentBitDepthFlagsKHR::TYPE_8) .luma_bit_depth(vk::VideoComponentBitDepthFlagsKHR::TYPE_8) } fn default_hdr10_profile(op: vk::VideoCodecOperationFlagsKHR) -> vk::VideoProfileInfoKHR<'static> { vk::VideoProfileInfoKHR::default() .video_codec_operation(op) .chroma_subsampling(vk::VideoChromaSubsamplingFlagsKHR::TYPE_420) .chroma_bit_depth(vk::VideoComponentBitDepthFlagsKHR::TYPE_10) .luma_bit_depth(vk::VideoComponentBitDepthFlagsKHR::TYPE_10) } fn default_encode_usage(driver_version: DriverVersion) -> vk::VideoEncodeUsageInfoKHR<'static> { // Nvidia chokes on "ULTRA LOW" for some reason. let tuning_mode = if matches!(driver_version, DriverVersion::NvidiaProprietary { .. }) { vk::VideoEncodeTuningModeKHR::LOW_LATENCY } else { vk::VideoEncodeTuningModeKHR::ULTRA_LOW_LATENCY }; vk::VideoEncodeUsageInfoKHR::default() .video_usage_hints(vk::VideoEncodeUsageFlagsKHR::STREAMING) .video_content_hints(vk::VideoEncodeContentFlagsKHR::RENDERED) .tuning_mode(tuning_mode) } fn single_profile_list_info<'a>( profile: &'a mut vk::VideoProfileInfoKHR, ) -> vk::VideoProfileListInfoKHR<'a> { vk::VideoProfileListInfoKHR { p_profiles: <*const _>::cast(profile), profile_count: 1, ..Default::default() } } fn default_structure( codec: VideoCodec, max_codec_layers: u32, max_dpb_slots: u32, ) -> anyhow::Result { const MAX_LAYERS: u32 = 4; const DEFAULT_GOP_SIZE: u32 = 256; // Disable hierarchical coding on H264, because it's broken. let mut layers = if codec == VideoCodec::H264 { 1 } else { std::cmp::min(MAX_LAYERS, max_codec_layers) }; let mut structure = HierarchicalP::new(layers, DEFAULT_GOP_SIZE); while structure.required_dpb_size() as u32 > max_dpb_slots { layers -= 1; if layers == 0 { bail!("max_dpb_slots too low"); } structure = HierarchicalP::new(layers, DEFAULT_GOP_SIZE); } Ok(structure) } ================================================ FILE: mm-server/src/main.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 mod codec; mod color; mod config; mod container; mod encoder; mod pixel_scale; mod server; mod session; mod state; mod vulkan; mod waking_sender; use std::{ os::unix::fs::DirBuilderExt, path::{Path, PathBuf}, process::Command, sync::Arc, }; use anyhow::{bail, Context, Result}; use clap::Parser; use parking_lot::Mutex; use tracing::{debug, info, warn}; use tracing_subscriber::{util::SubscriberInitExt, EnvFilter, Layer}; #[derive(Debug, Parser)] #[command(name = "mmserver")] #[command(about = "The Magic Mirror server", long_about = None)] struct Cli { /// Print the version. #[arg(short, long)] version: bool, /// The address to bind. Defaults to [::0]:9599. #[arg(long, value_name = "HOST[:PORT]")] bind: Option, /// Bind using systemd's socket passing protocol (LISTEN_FDS). #[arg(long)] bind_systemd: bool, /// The path to a config file. By default, /// /etc/magic-mirror/mmserver.{toml,json} is used (if present). #[arg(short = 'C', long, value_name = "FILE")] config: Option, /// Include extra app definitions. May be specified multiple times, with /// either individual files or directories to be searched. #[arg(short = 'i', long, value_name = "PATH")] include_apps: Vec, /// Generate a bug report in a temporary directory. WARNING: this will save /// video recordings, which may be large! #[arg(long)] bug_report: bool, } fn main() -> Result<()> { let args = Cli::parse(); let version = format!( "mmserver {}", git_version::git_version!( args = ["--always", "--tags", "--match", "mmserver-v"], prefix = "git:", cargo_prefix = "", ) ); if args.version { println!("{}", version); return Ok(()); } let bug_report_dir = if args.bug_report { let dirname = std::env::temp_dir().join(format!("magic-mirror-{}", uuid::Uuid::new_v4())); std::fs::DirBuilder::new().mode(0o0755).create(&dirname)?; Some(dirname) } else { None }; init_logging(bug_report_dir.as_ref())?; debug!(version, "starting up"); if let Some(ref dirname) = bug_report_dir { warn!("generating bug report files in: {:?}", &dirname); } #[cfg(feature = "tracy")] warn!("tracing enabled!"); // Load config. let mut cfg = config::Config::new(args.config.as_ref(), &args.include_apps) .context("failed to read config")?; let vk = Arc::new(vulkan::VkContext::new(cfg!(debug_assertions))?); preflight_checks(&cfg, &vk)?; // Override with command line flags. cfg.bug_report_dir = bug_report_dir.clone(); if let Some(bind) = args.bind { cfg.server.bind = bind; } else if args.bind_systemd { cfg.server.bind_systemd = true; } let sock = if cfg.server.bind_systemd { let mut listenfd = listenfd::ListenFd::from_env(); if let Some(sock) = listenfd.take_udp_socket(0)? { debug!("using systemd socket: {:?}", sock.local_addr()?); sock } else { bail!("systemd UDP socket not found") } } else { std::net::UdpSocket::bind(&cfg.server.bind).context("binding server socket")? }; let state = Arc::new(Mutex::new(state::ServerState::new(vk, cfg.clone()))); let mut srv = server::Server::new(sock, cfg.server.clone(), state)?; let closer = srv.closer(); ctrlc::set_handler(move || { debug!("received SIGINT"); closer.send(()).ok(); })?; info!("listening on {:?}", srv.local_addr()?); srv.run().context("server exited")?; if let Some(dir) = &bug_report_dir { save_vulkaninfo(dir); info!("bug report files saved to: {:?}", dir); } Ok(()) } fn init_logging(bug_report_dir: Option>) -> Result<()> { use tracing_subscriber::layer::SubscriberExt; let trace_log = if let Some(dir) = bug_report_dir { // Additionally write a trace log with everything to the bug report dir. let file = std::fs::File::create(dir.as_ref().join("mmserver.log"))?; let trace_filter = tracing_subscriber::EnvFilter::new("mmserver=trace,fuser=trace,southpaw=trace"); let trace_log = tracing_subscriber::fmt::layer() .with_ansi(false) .with_writer(std::sync::Mutex::new(file)) .with_filter(trace_filter); Some(trace_log) } else { None }; let tracy = if cfg!(feature = "tracy") { Some(tracing_tracy::TracyLayer::default().with_filter(EnvFilter::new("mmserver=trace"))) } else { None }; let printed_log = tracing_subscriber::fmt::layer().with_filter( EnvFilter::builder() .with_default_directive("mmserver=info".parse()?) .from_env_lossy(), ); tracing_subscriber::registry() .with(tracy) .with(trace_log) .with(printed_log) .init(); Ok(()) } fn preflight_checks(cfg: &config::Config, vk: &vulkan::VkContext) -> anyhow::Result<()> { match linux_version() { Some((major, minor)) if major < 6 => { bail!("kernel version {major}.{minor} is too low; 6.x required"); } None => warn!("unable to determine linux kernel version!"), _ => (), } match vk.device_info.driver_version { vulkan::DriverVersion::MesaRadv { major, minor, patch, } => { if major < 24 || (major == 24 && minor < 3) { bail!("mesa >= 24.3 required, have {major}.{minor}.{patch}"); } } vulkan::DriverVersion::NvidiaProprietary { major, minor } => { if major < 565 { bail!("driver version >= 565.x required, have {major}.{minor}"); } } vulkan::DriverVersion::Other(ref driver) => { warn!(driver, "using potentially unsupported vulkan driver") } } std::fs::create_dir_all(&cfg.data_home).context(format!( "failed to initialize data_home ({})", cfg.data_home.display(), ))?; // Check for Ubuntu's restrictions on rootless containers. if sysctl("apparmor_restrict_unprivileged_unconfined") || sysctl("apparmor_restrict_unprivileged_userns") { warn!( "Unprivileged user namespaces restricted by AppArmor! Launching applications will \ fail unless an exception is installed. Read more here: \ https://ubuntu.com/blog/ubuntu-23-10-restricted-unprivileged-user-namespaces" ) } Ok(()) } fn linux_version() -> Option<(u32, u32)> { let uname = rustix::system::uname(); let version = uname.release().to_str().ok()?; let version = version.split_whitespace().next()?; let mut parts = version.splitn(3, "."); let major = parts.next()?; let minor = parts.next()?; Some((major.parse().ok()?, minor.parse().ok()?)) } fn sysctl(name: &str) -> bool { const CTL_PATH: &str = "/proc/sys/kernel"; std::fs::read_to_string(Path::new(CTL_PATH).join(name)) .map(|s| s.trim() == "1") .ok() .unwrap_or_default() } fn save_vulkaninfo(bug_report_dir: impl AsRef) { match Command::new("vulkaninfo").env_clear().output() { Ok(output) => { let _ = std::fs::write( bug_report_dir.as_ref().join("vulkaninfo.log"), output.stdout, ); } Err(e) => debug!("failed to run vulkaninfo: {:#}", e), } } #[test] fn test_linux_version() { let Some((major, _minor)) = linux_version() else { panic!("failed to determine linux version"); }; assert!(major >= 6); } ================================================ FILE: mm-server/src/pixel_scale.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::fmt; use anyhow::anyhow; use mm_protocol as protocol; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct PixelScale(pub u32, pub u32); impl PixelScale { pub const ONE: Self = Self(1, 1); pub fn is_fractional(&self) -> bool { (self.0 % self.1) != 0 } pub fn ceil(self) -> Self { Self(self.0.next_multiple_of(self.1), self.1) } } impl std::fmt::Display for PixelScale { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:.1}", self.0 as f64 / self.1 as f64) } } impl Default for PixelScale { fn default() -> Self { PixelScale::ONE } } #[derive(Debug, Clone)] pub struct FractionalScaleError; impl fmt::Display for FractionalScaleError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "scale is fractional") } } impl TryFrom for PixelScale { type Error = anyhow::Error; fn try_from(scale: protocol::PixelScale) -> anyhow::Result { if scale.denominator == 0 && scale.numerator != 0 { Ok(Self(1, 1)) } else if scale.denominator == 0 || scale.numerator == 0 { Err(anyhow!( "invalid pixel scale: {}/{}", scale.numerator, scale.denominator )) } else { Ok(Self(scale.numerator, scale.denominator)) } } } impl From for f64 { fn from(value: PixelScale) -> Self { value.0 as f64 / value.1 as f64 } } impl TryFrom for u32 { type Error = FractionalScaleError; fn try_from(value: PixelScale) -> Result { if value.is_fractional() { return Err(FractionalScaleError); } Ok(value.0 / value.1) } } impl From for protocol::PixelScale { fn from(scale: PixelScale) -> Self { Self { numerator: scale.0, denominator: scale.1, } } } ================================================ FILE: mm-server/src/server/handlers/attachment/stats.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::time; use simple_moving_average::SMA as _; use tracing::info; pub struct AttachmentStats { app_id: String, start: time::Instant, total_transfer: u64, sma: simple_moving_average::SingleSumSMA, last_log: time::Instant, } impl AttachmentStats { pub fn new(app_id: String) -> Self { let now = time::Instant::now(); Self { app_id, start: now, total_transfer: 0, sma: simple_moving_average::SingleSumSMA::new(), last_log: now, } } pub fn record_frame(&mut self, _seq: u64, len: usize, duration: time::Duration) { self.total_transfer += len as u64; self.sma .add_sample((len as f64 * 8.0 / (1024.0 * 1024.0)) / duration.as_secs_f64()); let avg = self.sma.get_average(); if self.last_log.elapsed().as_secs() > 5 { self.last_log = time::Instant::now(); let total_transfer_gb = self.total_transfer as f32 / (1024.0 * 1024.0 * 1024.0); info!( duration = ?self.start.elapsed(), current_bitrate_mbps = avg, total_transfer_gb, "{}", self.app_id ); } #[cfg(feature = "tracy")] if _seq % 10 == 0 { tracy_client::plot!("video bitrate (mbps)", avg); } } } ================================================ FILE: mm-server/src/server/handlers/attachment.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{collections::BTreeMap, fs, path::PathBuf, time}; use mm_protocol::{self as protocol, error::ErrorCode}; use tracing::{debug, debug_span, error, trace}; mod stats; use super::{validate_attachment, validate_gamepad, ServerError, ValidationError}; use crate::{ server::stream::StreamWriter, session::{ compositor, control::{ControlMessage, DisplayParams, SessionEvent}, Attachment, }, }; impl From for protocol::VirtualDisplayParameters { fn from(params: DisplayParams) -> Self { protocol::VirtualDisplayParameters { resolution: Some(protocol::Size { width: params.width, height: params.height, }), framerate_hz: params.framerate, ui_scale: Some(params.ui_scale.into()), } } } struct AttachmentHandler<'a> { ctx: &'a super::Context, handle: Attachment, session_display_params: DisplayParams, attached: protocol::Attached, superscale: f64, // Keep track of the pointer lock, and debounce session events for it. pointer_lock: Option<(f64, f64)>, last_video_frame_recvd: time::Instant, last_audio_frame_recvd: time::Instant, current_video_stream_seq: u64, // For saving the bitstream to disk in bug reports. bug_report: Option<(PathBuf, BTreeMap)>, stats: stats::AttachmentStats, } #[derive(Debug, Clone)] enum AttachmentError { Finished, ServerError(ErrorCode, Option), } pub fn attach(ctx: &super::Context, msg: protocol::Attach) -> Result<(), ServerError> { let session_id = msg.session_id; let handler = AttachmentHandler::new(ctx, msg)?; // Make sure we detach, even if we panic. let mut handler = scopeguard::guard(handler, |h| { debug!("detaching from session"); if let Some(s) = ctx.state.lock().sessions.get_mut(&session_id) { s.detach(h.handle).ok(); }; }); handler.run() } impl<'a> AttachmentHandler<'a> { fn new(ctx: &'a super::Context, msg: protocol::Attach) -> Result { if msg.attachment_type() != protocol::AttachmentType::Operator { return Err(ServerError( ErrorCode::ErrorAttachmentParamsNotSupported, Some("unsupported attachment type".to_string()), )); } let session_id = msg.session_id; let (video_params, audio_params) = validate_attachment(msg).map_err(|err| match err { ValidationError::Unsupported(text) => { ServerError(ErrorCode::ErrorAttachmentParamsNotSupported, Some(text)) } ValidationError::Invalid(text) => ServerError(ErrorCode::ErrorProtocol, Some(text)), })?; let mut guard = ctx.state.lock(); let server_config = guard.cfg.server.clone(); let attachment_id = guard.id_generator.next_int(); let Some(session) = guard.sessions.get_mut(&session_id) else { return Err(ServerError(ErrorCode::ErrorSessionNotFound, None)); }; if !session.supports_stream(video_params) { return Err(ServerError( ErrorCode::ErrorAttachmentParamsNotSupported, Some("unsupported streaming resolution or codec".to_string()), )); } let stream_writer = StreamWriter::new( session_id, attachment_id, &server_config, ctx.outgoing_dgrams.clone(), ctx.max_dgram_len, ); let handle = match session.attach( attachment_id, true, video_params, audio_params, stream_writer, ) { Ok(v) => v, Err(err) => { error!(?err, "failed to attach to session"); return Err(ServerError( ErrorCode::ErrorServer, Some("failed to attach to session".to_string()), )); } }; let app_id = session.application_id.clone(); let display_params = session.display_params; let bug_report_dir = session.bug_report_dir.clone(); drop(guard); let superscale = display_params.height as f64 / video_params.height as f64; assert_eq!(display_params.height % video_params.height, 0); assert_eq!( display_params.width as f64 / video_params.width as f64, superscale ); debug!( ?video_params, ?audio_params, ?superscale, "attaching with params" ); let video_codec: protocol::VideoCodec = video_params.codec.into(); let video_profile: protocol::VideoProfile = video_params.profile.into(); let audio_codec: protocol::AudioCodec = audio_params.codec.into(); let attached = protocol::Attached { session_id, attachment_id: handle.attachment_id, video_codec: video_codec.into(), streaming_resolution: Some(protocol::Size { width: video_params.width, height: video_params.height, }), video_profile: video_profile.into(), quality_preset: video_params.preset, audio_codec: audio_codec.into(), sample_rate_hz: audio_params.sample_rate, channels: Some(protocol::AudioChannels { channels: vec![ protocol::audio_channels::Channel::Mono.into(); audio_params.channels as usize ], }), }; let pointer_lock = None; let now = time::Instant::now(); Ok(Self { ctx, handle, session_display_params: display_params, attached, superscale, pointer_lock, last_video_frame_recvd: now, last_audio_frame_recvd: now, current_video_stream_seq: 0, bug_report: bug_report_dir.map(|dir| (dir, BTreeMap::default())), stats: stats::AttachmentStats::new(app_id), }) } fn run(&mut self) -> Result<(), ServerError> { let span = debug_span!( "attachment", self.handle.session_id, self.handle.attachment_id, ); let _guard = span.enter(); if self .ctx .outgoing .send(self.attached.clone().into()) .is_err() { // Client already hung up. return Ok(()); } loop { crossbeam_channel::select! { recv(self.ctx.incoming) -> msg => { match msg { Ok(m) => { match self.handle_attachment_message(m) { Ok(_) => (), Err(AttachmentError::Finished) => return Ok(()), Err(AttachmentError::ServerError(code, text)) => { return Err(ServerError(code, text)); } } } Err(_) => return Ok(()), // Client fin. } }, recv(&self.handle.events) -> event => { match event { Ok(ev) => match self.handle_session_event(ev) { Ok(_) => (), Err(AttachmentError::Finished) => return Ok(()), Err(AttachmentError::ServerError(code, text)) => { return Err(ServerError(code, text)); } } Err(e) => { // Mark the session defunct. It'll get GC'd. error!("error in attach handler: {:#}", e); if let Some(s) = self.ctx.state.lock().sessions.get_mut(&self.handle.session_id) { s.defunct = true; }; return Err(ServerError( ErrorCode::ErrorServer, Some("internal server error".to_string()), )); } } }, } } } fn handle_attachment_message( &mut self, msg: protocol::MessageType, ) -> Result<(), AttachmentError> { match msg { protocol::MessageType::KeepAlive(_) => {} protocol::MessageType::Detach(_) => return Err(AttachmentError::Finished), protocol::MessageType::RequestVideoRefresh(ev) => { if ev.stream_seq == self.current_video_stream_seq { let _ = self.handle.control.send(ControlMessage::RefreshVideo); } else { debug!( current = self.current_video_stream_seq, requested = ev.stream_seq, "ignoring RequestVideoRefresh" ); } } protocol::MessageType::KeyboardInput(ev) => { use protocol::keyboard_input::KeyState; trace!(ev.key, ev.state, "received keyboard event: {:?}", ev); let state = match ev.state.try_into() { Ok(KeyState::Unknown) | Err(_) => { return Err(AttachmentError::ServerError( ErrorCode::ErrorProtocol, Some("invalid key state".to_string()), )); } Ok(KeyState::Pressed) => compositor::KeyState::Pressed, Ok(KeyState::Released) => compositor::KeyState::Released, Ok(KeyState::Repeat) => compositor::KeyState::Repeat, }; let key_code = match protocol::keyboard_input::Key::try_from(ev.key).map(key_to_evdev) { Ok(Some(scancode)) => scancode, _ => { return Err(AttachmentError::ServerError( ErrorCode::ErrorProtocol, Some("invalid key".to_string()), )); } }; let ch = match ev.char { 0 => None, c => match char::from_u32(c) { Some(c) => Some(c), None => { return Err(AttachmentError::ServerError( ErrorCode::ErrorProtocol, Some("invalid keychar".to_string()), )); } }, }; trace!(key_code, ?state, ?ch, "translated keyboard event"); self.handle .control .send(ControlMessage::KeyboardInput { key_code, state, char: ch, }) .ok(); } protocol::MessageType::PointerMotion(ev) => { let x = ev.x * self.superscale; let y = ev.y * self.superscale; self.handle .control .send(ControlMessage::PointerMotion(x, y)) .ok(); } protocol::MessageType::RelativePointerMotion(ev) => { let x = ev.x * self.superscale; let y = ev.y * self.superscale; self.handle .control .send(ControlMessage::RelativePointerMotion(x, y)) .ok(); } protocol::MessageType::PointerEntered(_) => { self.handle .control .send(ControlMessage::PointerEntered) .ok(); } protocol::MessageType::PointerLeft(_) => { self.handle.control.send(ControlMessage::PointerLeft).ok(); } protocol::MessageType::PointerInput(ev) => { use protocol::pointer_input::*; let state = match ev.state.try_into() { Ok(ButtonState::Unknown) | Err(_) => { return Err(AttachmentError::ServerError( ErrorCode::ErrorProtocol, Some("invalid button state".to_string()), )); } Ok(ButtonState::Pressed) => compositor::ButtonState::Pressed, Ok(ButtonState::Released) => compositor::ButtonState::Released, }; // https://gitlab.freedesktop.org/libinput/libinput/-/blob/main/include/linux/linux/input-event-codes.h#L354 let button_code = match ev.button.try_into() { Ok(Button::Left) => 0x110, Ok(Button::Right) => 0x111, Ok(Button::Middle) => 0x112, Ok(Button::Forward) => 0x115, Ok(Button::Back) => 0x116, _ => { return Err(AttachmentError::ServerError( ErrorCode::ErrorProtocol, Some("invalid button".to_string()), )); } }; trace!( button = ev.button, pressed = (state == compositor::ButtonState::Pressed), "sending cursor input event", ); self.handle .control .send(ControlMessage::PointerInput { x: ev.x, y: ev.y, button_code, state, }) .ok(); } protocol::MessageType::PointerScroll(ev) => match ev.scroll_type.try_into() { Ok(protocol::pointer_scroll::ScrollType::Continuous) => { let x = ev.x * self.superscale; let y = ev.y * self.superscale; self.handle .control .send(ControlMessage::PointerAxis(x, y)) .ok(); } Ok(protocol::pointer_scroll::ScrollType::Discrete) => { self.handle .control .send(ControlMessage::PointerAxisDiscrete(ev.x, ev.y)) .ok(); } _ => { return Err(AttachmentError::ServerError( ErrorCode::ErrorProtocol, Some("invalid scroll type".to_string()), )); } }, protocol::MessageType::GamepadAvailable(ev) => { let (id, _layout) = match validate_gamepad(ev.gamepad) { Ok(v) => v, Err(ValidationError::Invalid(text)) => { return Err(AttachmentError::ServerError( ErrorCode::ErrorProtocol, Some(text), )) } Err(_) => unreachable!(), }; self.handle .control .send(ControlMessage::GamepadAvailable(id)) .ok(); } protocol::MessageType::GamepadUnavailable(ev) => { self.handle .control .send(ControlMessage::GamepadUnavailable(ev.id)) .ok(); } protocol::MessageType::GamepadMotion(ev) => { let (scancode, is_trigger) = match protocol::gamepad_motion::GamepadAxis::try_from(ev.axis) .ok() .and_then(axis_to_evdev) { Some(v) => v, _ => { return Err(AttachmentError::ServerError( ErrorCode::ErrorProtocol, Some("invalid gamepad axis".to_string()), )); } }; let cm = if is_trigger { ControlMessage::GamepadTrigger { id: ev.gamepad_id, trigger_code: scancode, value: ev.value, } } else { ControlMessage::GamepadAxis { id: ev.gamepad_id, axis_code: scancode, value: ev.value, } }; self.handle.control.send(cm).ok(); } protocol::MessageType::GamepadInput(ev) => { use protocol::gamepad_input::{GamepadButton, GamepadButtonState}; let state = match ev.state.try_into() { Ok(GamepadButtonState::Unknown) | Err(_) => { return Err(AttachmentError::ServerError( ErrorCode::ErrorProtocol, Some("invalid gamepad button state".to_string()), )); } Ok(GamepadButtonState::Pressed) => compositor::ButtonState::Pressed, Ok(GamepadButtonState::Released) => compositor::ButtonState::Released, }; let scancode = match GamepadButton::try_from(ev.button) .ok() .and_then(gamepad_button_to_evdev) { Some(v) => v, _ => { return Err(AttachmentError::ServerError( ErrorCode::ErrorProtocol, Some("invalid gamepad button".to_string()), )); } }; self.handle .control .send(ControlMessage::GamepadInput { id: ev.gamepad_id, button_code: scancode, state, }) .ok(); } protocol::MessageType::Error(ev) => { error!( "received error from client: {}: {}", ev.err_code().as_str_name(), ev.error_text ); } msg => { debug!("received {} from client on attachment stream", msg); return Err(AttachmentError::ServerError( ErrorCode::ErrorProtocolUnexpectedMessage, None, )); } } Ok(()) } fn handle_session_event(&mut self, event: SessionEvent) -> Result<(), AttachmentError> { match event { SessionEvent::Shutdown => { // The session ended, probably because the app exited. self.ctx .state .lock() .sessions .remove(&self.handle.session_id); self.send(protocol::SessionEnded {}); return Err(AttachmentError::Finished); } SessionEvent::DisplayParamsChanged { params, reattach } => { self.session_display_params = params; let msg = protocol::SessionParametersChanged { display_params: Some(params.into()), supported_streaming_resolutions: super::generate_streaming_res(¶ms), reattach_required: reattach, }; self.send(msg); if reattach { return Err(AttachmentError::Finished); } } SessionEvent::VideoFrame { stream_seq, seq, frame, .. } => { self.current_video_stream_seq = self.current_video_stream_seq.max(stream_seq); let duration = self.last_video_frame_recvd.elapsed(); if duration > time::Duration::from_secs_f32( 1.5 / self.session_display_params.framerate as f32, ) { debug!(dur = ?duration, "slow video frame"); } self.last_video_frame_recvd = time::Instant::now(); self.stats.record_frame(seq, frame.len(), duration); if let Some((root, files)) = &mut self.bug_report { let file = files.entry(stream_seq).or_insert_with(|| { let ext = format!("{:?}", self.attached.video_codec()).to_lowercase(); let path = root.join(format!( "attachment-{:02}-{}.{}", stream_seq, self.handle.attachment_id, ext )); std::fs::File::create(path).unwrap() }); std::io::Write::write_all(file, &frame).unwrap(); std::io::Write::flush(file).unwrap(); } } SessionEvent::AudioFrame { seq, frame, .. } => { let duration = self.last_audio_frame_recvd.elapsed(); if duration > time::Duration::from_secs_f32( 1.5 / self.session_display_params.framerate as f32, ) { debug!(dur = ?duration, "slow audio frame"); } self.last_audio_frame_recvd = time::Instant::now(); self.stats.record_frame(seq, frame.len(), duration); } SessionEvent::CursorUpdate { image, icon, hotspot_x, hotspot_y, } => { use protocol::update_cursor::CursorIcon; let icon: CursorIcon = icon.map(cursor_icon_to_proto).unwrap_or(CursorIcon::None); let msg = protocol::UpdateCursor { image: image.unwrap_or_default(), icon: icon.into(), hotspot_x, hotspot_y, }; self.send(msg); } SessionEvent::PointerLocked(x, y) => { let x = x / self.superscale; let y = y / self.superscale; if self.pointer_lock.replace((x, y)).is_none() { self.send(protocol::LockPointer { x, y }); } } SessionEvent::PointerReleased => { if self.pointer_lock.take().is_some() { self.send(protocol::ReleasePointer {}); } } } Ok(()) } fn send(&self, msg: impl Into) { let _ = self.ctx.outgoing.send(msg.into()); } } fn key_to_evdev(key: protocol::keyboard_input::Key) -> Option { use protocol::keyboard_input::Key; match key { Key::Escape => Some(1), Key::Digit1 => Some(2), Key::Digit2 => Some(3), Key::Digit3 => Some(4), Key::Digit4 => Some(5), Key::Digit5 => Some(6), Key::Digit6 => Some(7), Key::Digit7 => Some(8), Key::Digit8 => Some(9), Key::Digit9 => Some(10), Key::Digit0 => Some(11), Key::Minus => Some(12), Key::Equal => Some(13), Key::Backspace => Some(14), Key::Tab => Some(15), Key::Q => Some(16), Key::W => Some(17), Key::E => Some(18), Key::R => Some(19), Key::T => Some(20), Key::Y => Some(21), Key::U => Some(22), Key::I => Some(23), Key::O => Some(24), Key::P => Some(25), Key::BracketLeft => Some(26), Key::BracketRight => Some(27), Key::Enter => Some(28), Key::ControlLeft => Some(29), Key::A => Some(30), Key::S => Some(31), Key::D => Some(32), Key::F => Some(33), Key::G => Some(34), Key::H => Some(35), Key::J => Some(36), Key::K => Some(37), Key::L => Some(38), Key::Semicolon => Some(39), Key::Quote => Some(40), Key::Backquote => Some(41), Key::ShiftLeft => Some(42), Key::Backslash => Some(43), Key::Z => Some(44), Key::X => Some(45), Key::C => Some(46), Key::V => Some(47), Key::B => Some(48), Key::N => Some(49), Key::M => Some(50), Key::Comma => Some(51), Key::Period => Some(52), Key::Slash => Some(53), Key::ShiftRight => Some(54), Key::NumpadMultiply => Some(55), Key::AltLeft => Some(56), Key::Space => Some(57), Key::CapsLock => Some(58), Key::F1 => Some(59), Key::F2 => Some(60), Key::F3 => Some(61), Key::F4 => Some(62), Key::F5 => Some(63), Key::F6 => Some(64), Key::F7 => Some(65), Key::F8 => Some(66), Key::F9 => Some(67), Key::F10 => Some(68), Key::NumLock => Some(69), Key::ScrollLock => Some(70), Key::Numpad7 => Some(71), Key::Numpad8 => Some(72), Key::Numpad9 => Some(73), Key::NumpadSubtract => Some(74), Key::Numpad4 => Some(75), Key::Numpad5 => Some(76), Key::Numpad6 => Some(77), Key::NumpadAdd => Some(78), Key::Numpad1 => Some(79), Key::Numpad2 => Some(80), Key::Numpad3 => Some(81), Key::Numpad0 => Some(82), Key::NumpadDecimal => Some(83), Key::Lang5 => Some(85), Key::IntlBackslash => Some(86), Key::F11 => Some(87), Key::F12 => Some(88), Key::IntlRo => Some(89), Key::Katakana => Some(90), Key::Hiragana => Some(91), Key::Convert => Some(92), Key::KanaMode => Some(93), Key::NonConvert => Some(94), Key::NumpadEnter => Some(96), Key::ControlRight => Some(97), Key::NumpadDivide => Some(98), Key::PrintScreen => Some(99), Key::AltRight => Some(100), Key::Home => Some(102), Key::ArrowUp => Some(103), Key::PageUp => Some(104), Key::ArrowLeft => Some(105), Key::ArrowRight => Some(106), Key::End => Some(107), Key::ArrowDown => Some(108), Key::PageDown => Some(109), Key::Insert => Some(110), Key::Delete => Some(111), Key::NumpadEqual => Some(117), Key::Pause => Some(119), Key::NumpadComma => Some(121), Key::IntlYen => Some(124), Key::MetaLeft => Some(125), Key::MetaRight => Some(126), Key::ContextMenu => Some(127), Key::Help => Some(138), Key::NumpadParenLeft => Some(179), Key::NumpadParenRight => Some(180), // Linux doesn't have this, so we'll map it to the regular backspace. Key::NumpadBackspace => Some(14), // TODO: Can't find these at all. Key::Fn | Key::FnLock => None, Key::Lang1 | Key::Lang2 | Key::Lang3 | Key::Lang4 => None, Key::NumpadClear | Key::NumpadClearEntry | Key::NumpadHash | Key::NumpadMemoryAdd | Key::NumpadMemoryClear | Key::NumpadMemoryRecall | Key::NumpadMemoryStore | Key::NumpadMemorySubtract => None, Key::Unknown => None, } } fn axis_to_evdev(axis: protocol::gamepad_motion::GamepadAxis) -> Option<(u32, bool)> { use protocol::gamepad_motion::GamepadAxis; match axis { GamepadAxis::LeftX => Some((0x00, false)), // ABS_X GamepadAxis::LeftY => Some((0x01, false)), // ABS_Y GamepadAxis::RightX => Some((0x03, false)), // ABS_RX GamepadAxis::RightY => Some((0x04, false)), // ABS_RY, GamepadAxis::LeftTrigger => Some((0x02, true)), // ABS_Z GamepadAxis::RightTrigger => Some((0x05, true)), // ABS_RZ GamepadAxis::Unknown => None, } } fn gamepad_button_to_evdev(button: protocol::gamepad_input::GamepadButton) -> Option { use protocol::gamepad_input::GamepadButton; match button { GamepadButton::DpadLeft => Some(0x222), // BTN_DPAD_LEFT GamepadButton::DpadRight => Some(0x223), // BTN_DPAD_RIGHT GamepadButton::DpadUp => Some(0x220), // BTN_DPAD_UP GamepadButton::DpadDown => Some(0x221), // BTN_DPAD_DOWN GamepadButton::South => Some(0x130), // BTN_SOUTH GamepadButton::East => Some(0x131), // BTN_EAST GamepadButton::North => Some(0x133), // BTN_NORTH GamepadButton::West => Some(0x134), // BTN_WEST GamepadButton::C => Some(0x132), // BTN_C GamepadButton::Z => Some(0x135), // BTN_Z GamepadButton::ShoulderLeft => Some(0x136), // BTN_TL GamepadButton::ShoulderRight => Some(0x137), // BTN_TR GamepadButton::JoystickLeft => Some(0x13d), // BTN_THUMBL GamepadButton::JoystickRight => Some(0x13e), // BTN_THUMBR GamepadButton::Start => Some(0x13b), // BTN_START GamepadButton::Select => Some(0x13a), // BTN_SELECT GamepadButton::Logo => Some(0x13c), // BTN_MODE GamepadButton::Share => None, // TODO I'm not sure what code to use. GamepadButton::TriggerLeft => Some(0x138), // BTN_TL2 GamepadButton::TriggerRight => Some(0x139), // BTN_TL3 GamepadButton::Unknown => None, } } fn cursor_icon_to_proto(icon: cursor_icon::CursorIcon) -> protocol::update_cursor::CursorIcon { use protocol::update_cursor::CursorIcon; match icon { cursor_icon::CursorIcon::ContextMenu => CursorIcon::ContextMenu, cursor_icon::CursorIcon::Help => CursorIcon::Help, cursor_icon::CursorIcon::Pointer => CursorIcon::Pointer, cursor_icon::CursorIcon::Progress => CursorIcon::Progress, cursor_icon::CursorIcon::Wait => CursorIcon::Wait, cursor_icon::CursorIcon::Cell => CursorIcon::Cell, cursor_icon::CursorIcon::Crosshair => CursorIcon::Crosshair, cursor_icon::CursorIcon::Text => CursorIcon::Text, cursor_icon::CursorIcon::VerticalText => CursorIcon::VerticalText, cursor_icon::CursorIcon::Alias => CursorIcon::Alias, cursor_icon::CursorIcon::Copy => CursorIcon::Copy, cursor_icon::CursorIcon::Move => CursorIcon::Move, cursor_icon::CursorIcon::NoDrop => CursorIcon::NoDrop, cursor_icon::CursorIcon::NotAllowed => CursorIcon::NotAllowed, cursor_icon::CursorIcon::Grab => CursorIcon::Grab, cursor_icon::CursorIcon::Grabbing => CursorIcon::Grabbing, cursor_icon::CursorIcon::EResize => CursorIcon::EResize, cursor_icon::CursorIcon::NResize => CursorIcon::NResize, cursor_icon::CursorIcon::NeResize => CursorIcon::NeResize, cursor_icon::CursorIcon::NwResize => CursorIcon::NwResize, cursor_icon::CursorIcon::SResize => CursorIcon::SResize, cursor_icon::CursorIcon::SeResize => CursorIcon::SeResize, cursor_icon::CursorIcon::SwResize => CursorIcon::SwResize, cursor_icon::CursorIcon::WResize => CursorIcon::WResize, cursor_icon::CursorIcon::EwResize => CursorIcon::EwResize, cursor_icon::CursorIcon::NsResize => CursorIcon::NsResize, cursor_icon::CursorIcon::NeswResize => CursorIcon::NeswResize, cursor_icon::CursorIcon::NwseResize => CursorIcon::NwseResize, cursor_icon::CursorIcon::ColResize => CursorIcon::ColResize, cursor_icon::CursorIcon::RowResize => CursorIcon::RowResize, cursor_icon::CursorIcon::AllScroll => CursorIcon::AllScroll, cursor_icon::CursorIcon::ZoomIn => CursorIcon::ZoomIn, cursor_icon::CursorIcon::ZoomOut => CursorIcon::ZoomOut, _ => CursorIcon::Default, } } ================================================ FILE: mm-server/src/server/handlers/validation.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use mm_protocol as protocol; use tracing::debug; use crate::{ codec::{AudioCodec, VideoCodec}, color::VideoProfile, pixel_scale::PixelScale, session::{ control::{AudioStreamParams, DisplayParams, VideoStreamParams}, GamepadLayout, }, }; pub enum ValidationError { Invalid(String), Unsupported(String), } type Result = std::result::Result; pub fn validate_display_params( params: Option, ) -> Result { if let Some(params) = params { let (width, height) = validate_resolution(params.resolution)?; let framerate = validate_framerate(params.framerate_hz)?; let ui_scale = validate_ui_scale(params.ui_scale)?; Ok(DisplayParams { width, height, framerate, ui_scale, }) } else { Err(ValidationError::Invalid( "display parameters missing".into(), )) } } pub fn validate_attachment( params: protocol::Attach, ) -> Result<(VideoStreamParams, AudioStreamParams)> { let (width, height) = validate_resolution(params.streaming_resolution)?; let video_codec = validate_video_codec(params.video_codec)?; let preset = validate_preset(params.quality_preset)?; let video_profile = validate_profile(params.video_profile)?; let sample_rate = validate_sample_rate(params.sample_rate_hz)?; let channels = validate_channels(params.channels)?; let audio_codec = validate_audio_codec(params.audio_codec)?; Ok(( VideoStreamParams { width, height, codec: video_codec, preset, profile: video_profile, }, AudioStreamParams { sample_rate, channels, codec: audio_codec, }, )) } pub fn validate_resolution(resolution: Option) -> Result<(u32, u32)> { match resolution { Some(ref size) => { let (width, height) = (size.width, size.height); if width != 0 && height != 0 && width % 2 == 0 && height % 2 == 0 { Ok((width, height)) } else { debug!("rejecting invalid resolution: {}x{}", width, height); Err(ValidationError::Invalid( "resolution must be non-zero and even".into(), )) } } None => Err(ValidationError::Invalid("resolution missing".into())), } } pub fn validate_ui_scale(ui_scale: Option) -> Result { match ui_scale { Some(scale) => match PixelScale::try_from(scale) { Ok(s) if !s.is_fractional() => Ok(s), Ok(_) => Err(ValidationError::Unsupported( "fractional UI scales are not supported".into(), )), Err(_) => Err(ValidationError::Invalid("invalid UI scale".into())), }, None => Ok(PixelScale::ONE), } } fn validate_profile(profile: i32) -> Result { let p: protocol::VideoProfile = match profile.try_into() { Err(_) => return Err(ValidationError::Invalid("invalid video profile".into())), Ok(protocol::VideoProfile::Unknown) => return Ok(VideoProfile::Hd), Ok(p) => p, }; match p.try_into() { Ok(p) => Ok(p), _ => Err(ValidationError::Unsupported( "unsupported video profile".into(), )), } } pub fn validate_video_codec(codec: i32) -> Result { let codec: protocol::VideoCodec = match codec.try_into() { Err(_) => return Err(ValidationError::Invalid("invalid video codec".into())), Ok(protocol::VideoCodec::Unknown) => return Ok(VideoCodec::H265), Ok(v) => v, }; match codec.try_into() { Ok(c) => Ok(c), Err(_) => Err(ValidationError::Invalid("invalid video codec".into())), } } pub fn validate_preset(preset: u32) -> Result { match preset { 0 => Ok(6), // Default to 6 v if v <= 10 => Ok(v - 1), _ => Err(ValidationError::Invalid("invalid preset".into())), } } pub fn validate_framerate(framerate: u32) -> Result { match framerate { 60 | 30 => Ok(framerate), _ => Err(ValidationError::Unsupported("unsupported framerate".into())), } } pub fn validate_audio_codec(codec: i32) -> Result { let codec: protocol::AudioCodec = match codec.try_into() { Err(_) => return Err(ValidationError::Invalid("invalid audio codec".into())), Ok(protocol::AudioCodec::Unknown) => return Ok(AudioCodec::Opus), Ok(v) => v, }; match codec.try_into() { Ok(c) => Ok(c), Err(_) => Err(ValidationError::Invalid("invalid audio codec".into())), } } pub fn validate_sample_rate(sample_rate: u32) -> Result { if sample_rate == 0 { Ok(48000) } else if !(16000..=48000).contains(&sample_rate) { Err(ValidationError::Invalid("invalid sample rate".into())) } else { Ok(sample_rate) } } pub fn validate_channels(channels: Option) -> Result { match channels { Some(map) => { let channels = map.channels.len() as u32; for ch in map.channels { if let Err(e) = protocol::audio_channels::Channel::try_from(ch) { return Err(ValidationError::Invalid(format!("invalid channel: {}", e))); } } if channels == 2 { Ok(channels) } else { Err(ValidationError::Unsupported( "unsupported number of channels".into(), )) } } None => Ok(2), // Default to stereo. } } pub fn validate_gamepad(gamepad: Option) -> Result<(u64, GamepadLayout)> { let Some(gamepad) = gamepad else { return Err(ValidationError::Invalid("gamepad is required".into())); }; let id = validate_gamepad_id(gamepad.id)?; let layout = validate_gamepad_layout(gamepad.layout)?; Ok((id, layout)) } pub fn validate_gamepad_id(id: u64) -> Result { if id == 0 { Err(ValidationError::Invalid("id must be non-zero".into())) } else { Ok(id) } } pub fn validate_gamepad_layout(layout: i32) -> Result { match layout.try_into() { Err(_) | Ok(protocol::gamepad::GamepadLayout::Unknown) => { Err(ValidationError::Invalid("invalid gamepad layout".into())) } Ok(_) => Ok(GamepadLayout::GenericDualStick), // TODO } } ================================================ FILE: mm-server/src/server/handlers.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{fs::File, path::Path}; use anyhow::bail; use bytes::Bytes; use crossbeam_channel::Receiver; use mm_protocol as protocol; use protocol::error::ErrorCode; use tracing::{debug, debug_span, error, trace}; use crate::{ session::{control::DisplayParams, Session}, state::SharedState, waking_sender::{WakingOneshot, WakingSender}, }; mod attachment; mod validation; use validation::*; #[derive(Debug, Clone)] struct ServerError(protocol::error::ErrorCode, Option); struct Context { state: SharedState, incoming: Receiver, outgoing: WakingSender, outgoing_dgrams: WakingSender>, max_dgram_len: usize, } impl Context { fn send_err(&self, err: ServerError) { let ServerError(code, text) = err; if let Some(text) = text.as_ref() { debug!("handler ended with error: {:?}: {}", code, text); } else { debug!("handler ended with error: {:?}", code); } let err = protocol::Error { err_code: code.into(), error_text: text.unwrap_or_default(), }; self.outgoing.send(err.into()).ok(); } } type Result = std::result::Result; pub fn dispatch( state: SharedState, incoming: Receiver, outgoing: WakingSender, outgoing_dgrams: WakingSender>, max_dgram_len: usize, done: WakingOneshot<()>, ) { let instant = std::time::Instant::now(); let initial = match incoming.recv() { Ok(msg) => msg, Err(_) => { error!("empty worker pipe"); return; } }; let span = debug_span!("dispatch", initial = %initial); let _guard = span.enter(); let ctx = Context { state, incoming, outgoing, outgoing_dgrams, max_dgram_len, }; match initial { protocol::MessageType::ListApplications(msg) => roundtrip(list_applications, &ctx, msg), protocol::MessageType::FetchApplicationImage(msg) => roundtrip(fetch_img, &ctx, msg), protocol::MessageType::LaunchSession(msg) => roundtrip(launch_session, &ctx, msg), protocol::MessageType::ListSessions(msg) => roundtrip(list_sessions, &ctx, msg), protocol::MessageType::UpdateSession(msg) => roundtrip(update_session, &ctx, msg), protocol::MessageType::EndSession(msg) => roundtrip(end_session, &ctx, msg), protocol::MessageType::Attach(msg) => { if let Err(err) = attachment::attach(&ctx, msg) { ctx.send_err(err); } else { // Clean exit, no final message. } } _ => { error!("unexpected message type: {}", initial); ctx.send_err(ServerError(ErrorCode::ErrorProtocolUnexpectedMessage, None)); } }; // Explicitly hang up. drop(ctx); let _ = done.send(()); debug!(dur = ?instant.elapsed(),"worker finished"); } fn roundtrip(f: F, ctx: &Context, req: Req) where Resp: Into, F: Fn(&Context, Req) -> Result, { match f(ctx, req) { Ok(resp) => { if ctx.outgoing.send(resp.into()).is_err() { debug!("client hung up before response could be sent"); } } Err(err) => { error!(?err, "handler returned error"); ctx.send_err(err); } } } fn list_applications( ctx: &Context, _msg: protocol::ListApplications, ) -> Result { let apps = ctx .state .lock() .cfg .apps .iter() .map(|(id, app)| protocol::application_list::Application { id: id.clone(), description: app.description.clone().unwrap_or_default(), folder: app.path.clone(), images_available: if app.header_image.is_some() { vec![protocol::ApplicationImageFormat::Header.into()] } else { vec![] }, }) .collect(); Ok(protocol::ApplicationList { list: apps }) } fn fetch_img( ctx: &Context, msg: protocol::FetchApplicationImage, ) -> Result { match msg.format.try_into() { Ok(protocol::ApplicationImageFormat::Header) => (), _ => { return Err(ServerError( ErrorCode::ErrorProtocol, Some("unknown application image type".to_string()), )); } } let Some(config) = ctx.state.lock().cfg.apps.get(&msg.application_id).cloned() else { return Err(ServerError( ErrorCode::ErrorApplicationNotFound, Some("application not found".to_string()), )); }; let Some(path) = &config.header_image else { return Err(ServerError( ErrorCode::ErrorApplicationNotFound, Some("image not found".to_string()), )); }; match read_file(path, crate::config::MAX_IMAGE_SIZE) { Ok(image_data) => Ok(protocol::ApplicationImage { image_data }), Err(err) => { error!(path = ?path, ?err, "failed to load image data"); Err(ServerError( ErrorCode::ErrorServer, Some("failed to load image".into()), )) } } } fn launch_session( ctx: &Context, msg: protocol::LaunchSession, ) -> Result { let display_params = validate_display_params(msg.display_params).map_err(|err| match err { ValidationError::Unsupported(text) => { ServerError(ErrorCode::ErrorSessionParamsNotSupported, Some(text)) } ValidationError::Invalid(text) => ServerError(ErrorCode::ErrorProtocol, Some(text)), })?; // Tracy gets confused if we have multiple sessions going. let mut guard = ctx.state.lock(); if cfg!(feature = "tracy") && !guard.sessions.is_empty() { return Err(ServerError( ErrorCode::ErrorServer, Some("only one session allowed if actively debugging".into()), )); } // Don't keep the state cloned while we launch the session. let vk_clone = guard.vk.clone(); let Some(application_config) = guard.cfg.apps.get(&msg.application_id).cloned() else { return Err(ServerError( ErrorCode::ErrorSessionLaunchFailed, Some("application not found".to_string()), )); }; for gamepad in msg.permanent_gamepads.clone() { validate_gamepad(Some(gamepad)).map_err(|err| match err { ValidationError::Unsupported(text) => { ServerError(ErrorCode::ErrorSessionParamsNotSupported, Some(text)) } ValidationError::Invalid(text) => ServerError(ErrorCode::ErrorProtocol, Some(text)), })?; } let bug_report_dir = guard.cfg.bug_report_dir.clone(); let (session_seq, session_id) = guard.generate_session_id(); drop(guard); // Create a folder in the bug report directory just for this session. let mut bug_report_dir = bug_report_dir; if let Some(ref mut dir) = bug_report_dir { dir.push(format!("session-{:02}-{}", session_seq, session_id)); std::fs::create_dir_all(dir).unwrap(); } let session = match Session::launch( vk_clone, session_id, &msg.application_id, &application_config, display_params, msg.permanent_gamepads, bug_report_dir, ) { Ok(session) => session, Err(err) => { error!(?err, "failed to launch session"); return Err(ServerError(ErrorCode::ErrorSessionLaunchFailed, None)); } }; let id = session.id; ctx.state.lock().sessions.insert(id, session); // XXX: The protocol allows us to support superresolution here, but we don't // know how to downscale before encoding (yet). Ok(protocol::SessionLaunched { id, supported_streaming_resolutions: generate_streaming_res(&display_params), }) } fn list_sessions(ctx: &Context, _msg: protocol::ListSessions) -> Result { let sessions = ctx .state .lock() .sessions .values() .map(|s| protocol::session_list::Session { application_id: s.application_id.clone(), session_id: s.id, session_start: Some(s.started.into()), display_params: Some(s.display_params.into()), supported_streaming_resolutions: generate_streaming_res(&s.display_params), permanent_gamepads: s.permanent_gamepads.clone(), }) .collect(); Ok(protocol::SessionList { list: sessions }) } fn update_session(ctx: &Context, msg: protocol::UpdateSession) -> Result { let display_params = validate_display_params(msg.display_params).map_err(|err| match err { ValidationError::Unsupported(text) => { ServerError(ErrorCode::ErrorSessionParamsNotSupported, Some(text)) } ValidationError::Invalid(text) => ServerError(ErrorCode::ErrorProtocol, Some(text)), })?; let mut state = ctx.state.lock(); let Some(session) = state.sessions.get_mut(&msg.session_id) else { return Err(ServerError(ErrorCode::ErrorSessionNotFound, None)); }; trace!(?session.display_params, ?display_params, "update_session"); if session.display_params != display_params { if let Err(err) = session.update_display_params(display_params) { error!(?err, "failed to update display params"); return Err(ServerError( ErrorCode::ErrorServer, Some("failed to update display params".to_string()), )); } } else { debug!("display params unchanged; ignoring update"); } Ok(protocol::SessionUpdated {}) } fn end_session(ctx: &Context, msg: protocol::EndSession) -> Result { let Some(session) = ctx.state.lock().sessions.remove(&msg.session_id) else { return Err(ServerError(ErrorCode::ErrorSessionNotFound, None)); }; if let Err(e) = session.stop() { error!("failed to gracefully stop session: {}", e) }; Ok(protocol::SessionEnded {}) } fn generate_streaming_res(display_params: &DisplayParams) -> Vec { // XXX: The protocol allows us to support superresolution here, but we don't // know how to downscale before encoding (yet). vec![protocol::Size { width: display_params.width, height: display_params.height, }] } fn read_file(p: impl AsRef, max_size: u64) -> anyhow::Result { use std::io::Read as _; use bytes::buf::BufMut; let mut r = File::open(p.as_ref())?.take(max_size + 1); let mut w = bytes::BytesMut::new().writer(); match std::io::copy(&mut r, &mut w) { Ok(len) if len > max_size => bail!("file is bigger than maximum size"), Ok(0) => bail!("file is empty"), Ok(len) => { let mut buf = w.into_inner(); Ok(buf.split_to(len as usize).freeze()) } Err(e) => Err(e.into()), } } #[cfg(test)] mod tests { use super::*; #[test] fn test_read_file() -> anyhow::Result<()> { let zero_file = mktemp::Temp::new_file()?; assert_eq!("".to_string(), std::fs::read_to_string(&zero_file)?); assert!(read_file(&zero_file, 1024).is_err()); drop(zero_file); let s = "foobar".repeat(64); let len = s.len() as u64; let big_file = mktemp::Temp::new_file()?; std::fs::write(&big_file, &s)?; assert_eq!(s, std::fs::read_to_string(&big_file)?); assert_eq!(s.as_bytes().to_vec(), read_file(&big_file, len)?); assert_eq!(s.as_bytes().to_vec(), read_file(&big_file, len + 1)?); assert!(read_file(&big_file, len - 1).is_err()); drop(big_file); Ok(()) } } ================================================ FILE: mm-server/src/server/mdns.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::net::SocketAddr; use anyhow::bail; use tracing::debug; pub struct MdnsService { daemon: mdns_sd::ServiceDaemon, service_name: String, } impl MdnsService { pub fn new( addr: SocketAddr, hostname: Option<&str>, instance_name: Option<&str>, ) -> anyhow::Result { let daemon = mdns_sd::ServiceDaemon::new()?; let txt = [( "mmp", std::str::from_utf8(mm_protocol::ALPN_PROTOCOL_VERSION).unwrap(), )]; let hostname = match hostname { Some(h) => h.to_owned(), None => mdns_hostname()?, }; let instance_name = match instance_name { Some(s) => s.to_owned(), None => mdns_instance_name(&hostname)?, }; let ip = addr.ip(); let (ip, ip_auto) = if ip.is_unspecified() { (vec![], true) } else { (vec![ip], false) }; let mut service_info = mdns_sd::ServiceInfo::new( "_magic-mirror._udp.local.", &instance_name, &hostname, &ip[..], addr.port(), &txt[..], )?; if ip_auto { service_info = service_info.enable_addr_auto(); } let service_name = service_info.get_fullname().to_owned(); daemon.register(service_info)?; debug!(hostname, instance_name, ip = ?ip.first(), ip_auto, "advertizing service"); Ok(Self { daemon, service_name, }) } } impl Drop for MdnsService { fn drop(&mut self) { loop { match self.daemon.unregister(&self.service_name) { Ok(_) => break, Err(mdns_sd::Error::Again) => continue, Err(err) => { debug!(?err, "error shutting down mdns daemon"); return; } } } loop { match self.daemon.shutdown() { Ok(_) => return, Err(mdns_sd::Error::Again) => continue, Err(err) => { debug!(?err, "error shutting down mdns daemon"); return; } } } } } fn mdns_hostname() -> anyhow::Result { let uname = rustix::system::uname(); let hostname = uname.nodename().to_str()?; if hostname.is_empty() { bail!("empty hostname"); } if hostname.ends_with(".local") { return Ok(format!("{hostname}.")); } else if hostname.contains('.') { bail!("hostname appears to be a qualified domain"); } Ok(format!("{hostname}.local.")) } fn mdns_instance_name(hostname: &str) -> anyhow::Result { if hostname.is_empty() { bail!("empty hostname"); } let hostname = match hostname.split_once('.') { Some((host, _)) => host, None => hostname, }; Ok(hostname.to_uppercase()) } ================================================ FILE: mm-server/src/server/sendmmsg.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::os::fd::{AsFd, AsRawFd}; use std::time; use std::{io::IoSlice, net::SocketAddr}; use nix::sys::socket::{ cmsg_space, setsockopt, sockopt::TxTime, ControlMessage, MsgFlags, MultiHeaders, SockaddrStorage, }; use tracing::instrument; #[derive(Default)] pub struct SendMmsg<'a> { iovs: Vec<[IoSlice<'a>; 1]>, addrs: Vec>, txtimes: Vec, } impl<'a> SendMmsg<'a> { #[instrument(skip_all)] pub fn sendmsg(mut self, buf: &'a [u8], addr: SocketAddr, txtime: time::Instant) -> Self { self.iovs.push([IoSlice::new(buf)]); self.addrs.push(Some(addr.into())); let txtime = std_time_to_u64(&txtime); self.txtimes.push(txtime); self } #[instrument(skip_all)] pub fn finish(&mut self, fd: &impl AsRawFd) -> Result<(), nix::Error> { let mut data: MultiHeaders = MultiHeaders::preallocate( self.iovs.len(), Some(Vec::with_capacity(cmsg_space::() * self.iovs.len())), ); let cmsgs = self .txtimes .iter() .map(ControlMessage::TxTime) .collect::>(); loop { match nix::sys::socket::sendmmsg( fd.as_raw_fd(), &mut data, &self.iovs, &self.addrs, &cmsgs, MsgFlags::empty(), ) { Ok(_) => break, Err(nix::errno::Errno::EAGAIN) => continue, Err(e) => return Err(e), }; } Ok(()) } } pub fn new<'a>() -> SendMmsg<'a> { SendMmsg::default() } #[cfg(target_os = "linux")] pub fn set_so_txtime(sock: &impl AsFd) -> anyhow::Result<()> { let config = nix::libc::sock_txtime { clockid: nix::libc::CLOCK_MONOTONIC, flags: 0, }; setsockopt(&sock, TxTime, &config)?; Ok(()) } #[cfg(target_os = "linux")] fn std_time_to_u64(time: &std::time::Instant) -> u64 { const NANOS_PER_SEC: u64 = 1_000_000_000; const INSTANT_ZERO: std::time::Instant = unsafe { std::mem::transmute(std::time::UNIX_EPOCH) }; let raw_time = time.duration_since(INSTANT_ZERO); let sec = raw_time.as_secs(); let nsec = raw_time.subsec_nanos(); sec * NANOS_PER_SEC + nsec as u64 } ================================================ FILE: mm-server/src/server/stream.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use bytes::Bytes; use either::Either; use mm_protocol as protocol; use tracing::{debug, error, instrument, trace_span}; use crate::{config, waking_sender::WakingSender}; /// A helper to write audio/video frames out as chunks to the client. Runs on /// the encoder thread, not on the server thread. pub struct StreamWriter { session_id: u64, attachment_id: u64, outgoing: WakingSender>, chunk_size: usize, max_dgram_len: usize, fec_ratios: Vec, audio_stream_seq: u64, audio_seq: u64, video_stream_seq: u64, video_seq: u64, } impl StreamWriter { pub fn new( session_id: u64, attachment_id: u64, config: &config::ServerConfig, outgoing: WakingSender>, max_dgram_len: usize, ) -> Self { // max_dgram_len is our overall MTU. The MM protocol header is 2-10 bytes, // and then we include seven varints (maximum 5 bytes each) and a bool of // metadata, plus an optional 12-ish bytes of FEC information. 64 bytes of // headroom should cover the worst case. However, a little extra will // increase the chance that the packet is coalesced into an existing QUIC // packet. let chunk_size = max_dgram_len - 128; Self { session_id, attachment_id, outgoing, chunk_size, max_dgram_len, fec_ratios: config.video_fec_ratios.clone(), // The first stream_seq is 1, but we increment immediately below. audio_stream_seq: 0, video_stream_seq: 0, audio_seq: 0, video_seq: 0, } } #[instrument(skip_all)] pub fn write_video_frame( &mut self, pts: u64, frame: Bytes, hierarchical_layer: u32, stream_restart: bool, ) -> (u64, u64) { if stream_restart { self.video_stream_seq += 1; self.video_seq = 0; debug!( stream_seq = self.video_stream_seq, "starting or restarting video stream" ); } let seq = self.video_seq; let fec_ratio = self .fec_ratios .get(hierarchical_layer as usize) .copied() .unwrap_or_default(); for chunk in iter_chunks(frame, self.chunk_size, fec_ratio) { let msg = protocol::VideoChunk { session_id: self.session_id, attachment_id: self.attachment_id, stream_seq: self.video_stream_seq, seq, data: chunk.data, chunk: chunk.index, num_chunks: chunk.num_chunks, hierarchical_layer, timestamp: pts, fec_metadata: chunk.fec_metadata, }; let res: Result<_, protocol::ProtocolError> = trace_span!("encode_message").in_scope(|| { let mut buf = vec![0; self.max_dgram_len]; let len = protocol::encode_message(&msg.into(), &mut buf)?; buf.truncate(len); Ok(buf) }); match res { Ok(buf) => { let _ = self.outgoing.send(buf); } Err(err) => { error!(?err, "failed to encode video chunk"); } }; } self.video_seq += 1; (self.video_stream_seq, seq) } #[instrument(skip_all)] pub fn write_audio_frame( &mut self, pts: u64, frame: Bytes, stream_restart: bool, ) -> (u64, u64) { if stream_restart { self.audio_stream_seq += 1; self.audio_seq = 0; debug!( stream_seq = self.audio_stream_seq, "starting or restarting audio stream" ); } let seq = self.audio_seq; for chunk in iter_chunks(frame, self.chunk_size, 0.0) { let msg = protocol::AudioChunk { session_id: self.session_id, attachment_id: self.attachment_id, stream_seq: self.audio_stream_seq, seq, data: chunk.data, chunk: chunk.index, num_chunks: chunk.num_chunks, timestamp: pts, fec_metadata: chunk.fec_metadata, }; let res: Result<_, protocol::ProtocolError> = trace_span!("encode_message").in_scope(|| { let mut buf = vec![0; self.max_dgram_len]; let len = protocol::encode_message(&msg.into(), &mut buf)?; buf.truncate(len); Ok(buf) }); match res { Ok(buf) => { let _ = self.outgoing.send(buf); } Err(err) => { error!(?err, "failed to encode audio chunk"); } }; } self.audio_seq += 1; (self.audio_stream_seq, seq) } } pub struct Chunk { pub index: u32, pub num_chunks: u32, pub data: Bytes, pub fec_metadata: Option, } pub fn iter_chunks( mut buf: bytes::Bytes, mtu: usize, fec_ratio: f32, ) -> impl Iterator { if fec_ratio > 0.0 { return Either::Left(iter_chunks_fec(buf, mtu, fec_ratio)); } let num_chunks = buf.len().div_ceil(mtu) as u32; let mut next_chunk: u32 = 0; let span = trace_span!("iter_chunks"); let _guard = span.enter(); Either::Right(std::iter::from_fn(move || { if buf.is_empty() { return None; } let data = if buf.len() < mtu { buf.split_to(buf.len()) } else { buf.split_to(mtu) }; let chunk = next_chunk; next_chunk += 1; Some(Chunk { index: chunk, num_chunks, data, fec_metadata: None, }) })) } #[instrument(skip_all)] fn iter_chunks_fec(buf: Bytes, mtu: usize, ratio: f32) -> impl Iterator { let encoder = raptorq::Encoder::with_defaults(&buf, mtu as u16); let oti = Bytes::copy_from_slice(&encoder.get_config().serialize()); let base_chunks = buf.len().div_ceil(mtu) as u32; let repair_chunks = (base_chunks as f32 * ratio).ceil() as u32; let chunks = encoder.get_encoded_packets(repair_chunks); let num_chunks = chunks.len() as u32; chunks.into_iter().enumerate().map(move |(chunk, p)| Chunk { index: chunk as u32, num_chunks, data: Bytes::copy_from_slice(p.data()), fec_metadata: Some(mm_protocol::FecMetadata { fec_scheme: protocol::fec_metadata::FecScheme::Raptorq.into(), fec_payload_id: Bytes::copy_from_slice(&p.payload_id().serialize()), fec_oti: oti.clone(), }), }) } #[cfg(test)] mod tests { use super::*; #[test] fn test_iter_chunks() { let frame = bytes::Bytes::from(vec![9; 3536]); let mut chunks = iter_chunks(frame, 1200, 0.0); let chunk = chunks.next().unwrap(); assert_eq!(chunk.index, 0); assert_eq!(chunk.num_chunks, 3); assert_eq!(chunk.data.len(), 1200); assert_eq!(chunk.fec_metadata, None); let chunk = chunks.next().unwrap(); assert_eq!(chunk.index, 1); assert_eq!(chunk.num_chunks, 3); assert_eq!(chunk.data.len(), 1200); assert_eq!(chunk.fec_metadata, None); let chunk = chunks.next().unwrap(); assert_eq!(chunk.index, 2); assert_eq!(chunk.num_chunks, 3); assert_eq!(chunk.data.len(), 1136); assert_eq!(chunk.fec_metadata, None); assert!(chunks.next().is_none()); } #[test] fn test_iter_chunks_fec() { let frame = bytes::Bytes::from(vec![9; 3536]); let mut chunks = iter_chunks(frame, 1200, 0.15); let chunk = chunks.next().unwrap(); assert_eq!(chunk.index, 0); assert_eq!(chunk.num_chunks, 4); assert_eq!(chunk.data.len(), 1200); assert_eq!( chunk.fec_metadata.as_ref().unwrap().fec_scheme(), protocol::fec_metadata::FecScheme::Raptorq ); assert_eq!(chunk.fec_metadata.as_ref().unwrap().fec_oti.len(), 12); let chunk = chunks.next().unwrap(); assert_eq!(chunk.index, 1); assert_eq!(chunk.num_chunks, 4); assert_eq!(chunk.data.len(), 1200); assert_eq!( chunk.fec_metadata.as_ref().unwrap().fec_scheme(), protocol::fec_metadata::FecScheme::Raptorq ); assert_eq!(chunk.fec_metadata.as_ref().unwrap().fec_oti.len(), 12); let chunk = chunks.next().unwrap(); assert_eq!(chunk.index, 2); assert_eq!(chunk.num_chunks, 4); assert_eq!(chunk.data.len(), 1200); assert_eq!( chunk.fec_metadata.as_ref().unwrap().fec_scheme(), protocol::fec_metadata::FecScheme::Raptorq ); assert_eq!(chunk.fec_metadata.as_ref().unwrap().fec_oti.len(), 12); let chunk = chunks.next().unwrap(); assert_eq!(chunk.index, 3); assert_eq!(chunk.num_chunks, 4); assert_eq!(chunk.data.len(), 1200); assert_eq!( chunk.fec_metadata.as_ref().unwrap().fec_scheme(), protocol::fec_metadata::FecScheme::Raptorq ); assert_eq!(chunk.fec_metadata.as_ref().unwrap().fec_oti.len(), 12); assert!(chunks.next().is_none()); } } ================================================ FILE: mm-server/src/server.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 mod handlers; mod mdns; mod sendmmsg; pub mod stream; use std::collections::{BTreeMap, VecDeque}; use std::net::SocketAddr; use std::sync::Arc; use std::time; use anyhow::anyhow; use anyhow::bail; use anyhow::Context; use bytes::{Buf, Bytes, BytesMut}; use crossbeam_channel::{Receiver, Sender, TryRecvError}; use hashbrown::HashMap; use mm_protocol as protocol; use protocol::error::ErrorCode; use ring::rand::{self, SecureRandom}; use tracing::trace; use tracing::trace_span; use tracing::warn; use tracing::{debug, error}; use tracing::{debug_span, instrument}; use crate::state::SharedState; use crate::waking_sender::WakingOneshot; use crate::waking_sender::WakingSender; const MAX_QUIC_PACKET_SIZE: usize = 1350; const SOCKET: mio::Token = mio::Token(0); const WAKER: mio::Token = mio::Token(1); pub struct Server { server_config: crate::config::ServerConfig, quiche_config: quiche::Config, addr: SocketAddr, socket: mio::net::UdpSocket, scratch: BytesMut, outgoing_packets: VecDeque, poll: mio::Poll, waker: Arc, next_timer_token: usize, thread_pool: threadpool::ThreadPool, clients: HashMap, ClientConnection>, state: SharedState, close_recv: Receiver<()>, close_send: WakingSender<()>, _mdns: Option, shutting_down: bool, } struct Outgoing { buf: Bytes, to: SocketAddr, } pub struct StreamWorker { incoming_messages: Option>, outgoing_messages: Receiver, done: oneshot::Receiver<()>, } pub struct ClientConnection { remote_addr: SocketAddr, conn_id: quiche::ConnectionId<'static>, conn: quiche::Connection, timer: mio_timerfd::TimerFd, timeout_token: mio::Token, partial_reads: BTreeMap, partial_writes: BTreeMap, in_flight: BTreeMap, dgram_recv: Receiver>, dgram_send: WakingSender>, last_keepalive: time::Instant, } impl Server { pub fn new( socket: std::net::UdpSocket, server_config: crate::config::ServerConfig, state: SharedState, ) -> anyhow::Result { let poll = mio::Poll::new().unwrap(); let waker = Arc::new(mio::Waker::new(poll.registry(), WAKER)?); let clients = HashMap::new(); let mut config = match (&server_config.tls_cert, &server_config.tls_key) { (Some(cert), Some(key)) => { let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?; config .load_cert_chain_from_pem_file(cert.to_str().unwrap()) .context("loading certificate file")?; config .load_priv_key_from_pem_file(key.to_str().unwrap()) .context("loading private key file")?; config } _ => { let addr = socket.local_addr()?; let ip = addr.ip(); if ip_rfc::global(&ip) || ip.is_unspecified() { bail!("TLS is required for non-private addresses"); } let tls_ctx = self_signed_tls_ctx(addr)?; quiche::Config::with_boring_ssl_ctx_builder(quiche::PROTOCOL_VERSION, tls_ctx)? } }; config.set_application_protos(&[protocol::ALPN_PROTOCOL_VERSION])?; config.set_initial_max_data(65536); config.set_initial_max_stream_data_bidi_remote(65536); config.set_initial_max_stream_data_bidi_local(65536); config.set_initial_max_stream_data_uni(65536); config.set_initial_max_streams_bidi(64); config.set_initial_max_streams_uni(64); config.enable_dgram(true, 0, 1024 * 1024); config.enable_early_data(); // Set the idle timeout to 10s. If any streams are active, we send // ack-eliciting frames so that we don't accidentally kill a client // that's in the middle of something slow (like launching a session). config.set_max_idle_timeout(10_000); // Storage for packets that would have blocked on sending. let outgoing_packets = VecDeque::new(); socket.set_nonblocking(true)?; sendmmsg::set_so_txtime(&socket)?; let mut socket = mio::net::UdpSocket::from_std(socket); poll.registry() .register(&mut socket, SOCKET, mio::Interest::READABLE)?; let (close_send, close_recv) = crossbeam_channel::bounded(1); let close_send = WakingSender::new(waker.clone(), close_send); let thread_pool = threadpool::ThreadPool::new(server_config.worker_threads.get() as usize); let addr = socket.local_addr()?; let mdns = if server_config.mdns { match mdns::MdnsService::new( addr, server_config.mdns_hostname.as_deref(), server_config.mdns_instance_name.as_deref(), ) { Ok(sd) => Some(sd), Err(e) => { error!("failed to enable mDNS service discovery: {e:#}"); None } } } else { None }; Ok(Self { server_config, quiche_config: config, addr: socket.local_addr()?, socket, scratch: BytesMut::with_capacity(65536), outgoing_packets, poll, waker, next_timer_token: 1024, thread_pool, clients, state, close_send, close_recv, _mdns: mdns, shutting_down: false, }) } pub fn local_addr(&self) -> anyhow::Result { Ok(self.socket.local_addr()?) } pub fn closer(&self) -> WakingSender<()> { self.close_send.clone() } /// Starts the server loop, returning only on error. pub fn run(&mut self) -> anyhow::Result<()> { let mut events = mio::Events::with_capacity(1024); 'poll: loop { // TODO: It might be worthwhile to switch to a busy loop if // there are any active sessions. That would mean handling quiche // timeouts in userspace. let poll_res = trace_span!("poll").in_scope(|| { self.poll .poll(&mut events, Some(time::Duration::from_secs(1))) }); match poll_res { Ok(_) => (), Err(e) if e.kind() == std::io::ErrorKind::Interrupted => continue, Err(e) => return Err(e.into()), } #[cfg(feature = "tracy")] { tracy_client::plot!( "active streams", self.clients .iter() .map(|(_, c)| c.in_flight.len()) .sum::() as f64 ); tracy_client::plot!( "dgram send queue", self.clients .iter() .map(|(_, c)| c.conn.dgram_send_queue_len()) .sum::() as f64 ); tracy_client::plot!("outgoing packet queue", self.outgoing_packets.len() as f64); } // Check if we're supposed to shut down. if let Ok(()) = self.close_recv.try_recv() { debug!("shutting down server"); self.shutting_down = true; for client in self.clients.values_mut() { match client.conn.close(true, 0, &[]) { Ok(_) | Err(quiche::Error::Done) => (), Err(e) => { bail!("failed to close connection: {:?}", e); } } } } for event in events.iter() { // Check if the token is a timeout token. let client = self .clients .values_mut() .find(|c| c.timeout_token == event.token()); if let Some(client) = client { client.timer.read()?; client.conn.on_timeout(); client.update_timeout()?; } } // Garbage-collect dead sessions. self.state.lock().tick()?; // Garbage-collect closed clients. self.clients.retain(|_, c| { if c.conn.is_closed() { debug!(conn_id = ?c.conn_id, remote_addr = ?c.remote_addr, "client disconnected"); false } else if c.conn.is_draining() { // Drop the workers, which drops the send/recv channels, // signaling that the workers can exit already. c.in_flight.clear(); true } else { true } }); if self.shutting_down && self.clients.is_empty() { return Ok(()); } else if self.shutting_down { debug!("waiting for {} clients to disconnect", self.clients.len()); } // Read incoming UDP packets and handle them. 'read: loop { self.scratch.resize(MAX_QUIC_PACKET_SIZE, 0); let (len, from) = match self.socket.recv_from(&mut self.scratch) { Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => { break 'read; } v => v.context("recv_from error")?, }; let pkt = self.scratch.split_to(len); match self.recv(pkt, from) { Ok(_) => {} Err(e) => { error!("recv failed: {:?}", e); } } } // Write out any queued packets. while !self.outgoing_packets.is_empty() { let pkt = self.outgoing_packets.pop_front().unwrap(); match self.socket.send_to(&pkt.buf, pkt.to) { Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => { self.outgoing_packets.push_front(pkt); continue 'poll; } v => v?, }; } // Let workers know if any peers hung up, and let peers know if any // workers finished. for client in self.clients.values_mut() { let mut to_close = Vec::new(); for (sid, worker) in client.in_flight.iter_mut() { if client.conn.stream_finished(*sid) { trace!("peer hung up on stream {:?}:{}", client.conn_id, sid); worker.incoming_messages.take(); } if matches!( worker.done.try_recv(), Ok(()) | Err(oneshot::TryRecvError::Disconnected) ) && worker.outgoing_messages.is_empty() && !client.partial_writes.contains_key(sid) { to_close.push(*sid); } } for sid in to_close { trace!(sid, "closing stream because worker finished"); let _ = client.conn.stream_send(sid, &[], true); let _ = client.conn.stream_shutdown(sid, quiche::Shutdown::Read, 0); client.in_flight.remove(&sid); } } #[cfg(feature = "tracy")] let mut max_txtime: f64 = 0.0; // Demux packets from in-flight requests and datagrams from attachments. for client in self.clients.values_mut() { let conn_span = trace_span!("conn_write", conn_id = ?client.conn_id); let _guard = conn_span.enter(); if client.conn.is_draining() { continue; } loop { if client.conn.is_dgram_send_queue_full() { warn!("datagram send queue full!"); break; } let msg = match client.dgram_recv.try_recv() { Ok(msg) => msg, Err(TryRecvError::Disconnected) => unreachable!(), Err(TryRecvError::Empty) => break, }; match client.send_dgram(msg) { Ok(_) => {} Err(e) => { match e.downcast_ref::() { Some(quiche::Error::Done) => (), _ => error!("failed to send datagram: {}", e), } client .conn .close(true, ErrorCode::ErrorProtocol as u64, &[]) .ok(); break; } } } for sid in client.conn.writable() { if !client.in_flight.contains_key(&sid) { continue; } if !client.flush_partial_write(sid)? { continue; } loop { let span = trace_span!("stream_write", sid); let _guard = span.enter(); match client .in_flight .get(&sid) .unwrap() .outgoing_messages .try_recv() { Ok(msg) => { if !client.write_message(sid, msg, false, &mut self.scratch)? { // No more write capacity at the moment. break; } } Err(_) => break, } } } } // Generate outgoing QUIC packets. let mut packets = Vec::new(); let mut off = 0; for client in self.clients.values_mut() { let span = trace_span!("gather_send", conn_id = ?client.conn_id); let _guard = span.enter(); // Generate ack-eliciting keepalives for any clients with open // streams. Clients with no open streams are allowed to time // out. client.send_periodic_keepalive()?; loop { let start = off; self.scratch.resize(off + MAX_QUIC_PACKET_SIZE, 0); let (len, send_info) = match client.conn.send(&mut self.scratch[off..]) { Ok(v) => v, Err(quiche::Error::Done) => break, Err(e) => { error!("QUIC error: {:?}", e); continue; } }; off += len; packets.push((start..(start + len), send_info.to, send_info.at)); } // Update the timeout. client.update_timeout()?; } // Send out the packets. if !packets.is_empty() { let mut sendmmsg = sendmmsg::new(); for (range, to, txtime) in packets { sendmmsg = sendmmsg.sendmsg(&self.scratch[range], to, txtime); // Plot the max txtime difference. #[cfg(feature = "tracy")] { max_txtime = max_txtime.max( txtime .duration_since(std::time::Instant::now()) .as_secs_f64() / 1000.0, ); } } sendmmsg.finish(&self.socket)?; } #[cfg(feature = "tracy")] tracy_client::plot!("max txtime (ms)", max_txtime); } } /// Handles an incoming datagram. fn recv(&mut self, mut pkt: BytesMut, from: SocketAddr) -> anyhow::Result<()> { let hdr = match quiche::Header::from_slice(&mut pkt, quiche::MAX_CONN_ID_LEN) { Ok(v) => v, Err(e) => { bail!("invalid packet: {:?}", e); } }; let num_clients = self.clients.len(); let client = match self.clients.get_mut(&hdr.dcid) { Some(c) => c, None if self.shutting_down => return Ok(()), None => { if hdr.ty != quiche::Type::Initial { debug!("invalid packet: dcid not found and not Initial"); return Ok(()); } if let Some(max) = self.server_config.max_connections { if num_clients as u32 >= max.get() { warn!("rejecting connection: max_connections ({}) reached", max); return Ok(()); } } if !quiche::version_is_supported(hdr.version) { debug!( "version {:x} is not supported; doing version negotiation", hdr.version ); let out = { self.scratch.resize(MAX_QUIC_PACKET_SIZE, 0); let len = quiche::negotiate_version(&hdr.scid, &hdr.dcid, &mut self.scratch)?; self.scratch.split_to(len).freeze() }; self.outgoing_packets .push_back(Outgoing { buf: out, to: from }); return Ok(()); } let conn_id = gen_random_cid(); let conn = quiche::accept(&conn_id, None, self.addr, from, &mut self.quiche_config)?; let mut timer = mio_timerfd::TimerFd::new(mio_timerfd::ClockId::Monotonic)?; let timeout_token = mio::Token(self.next_timer_token); self.next_timer_token += 1; self.poll.registry().register( &mut timer, timeout_token, mio::Interest::READABLE, )?; let streams = BTreeMap::new(); let (dgram_send, dgram_recv) = crossbeam_channel::unbounded(); let dgram_send = WakingSender::new(self.waker.clone(), dgram_send); let c = ClientConnection { remote_addr: from, conn_id: conn_id.clone(), conn, timer, timeout_token, in_flight: streams, partial_reads: BTreeMap::new(), partial_writes: BTreeMap::new(), dgram_recv, dgram_send, last_keepalive: time::Instant::now(), }; debug!("new client connection: {}", from); self.clients.entry(conn_id).or_insert(c) } }; // Run QUIC machinery. client.conn.recv( &mut pkt, quiche::RecvInfo { from, to: self.addr, }, )?; for sid in client.conn.readable() { let (messages, fin) = match client.read_messages(sid, &mut self.scratch) { Ok(v) => v, Err(e) => { if e.downcast_ref::().is_some() { client.err_stream( sid, ErrorCode::ErrorProtocol, Some(e.to_string()), &mut self.scratch, ); } else { error!("unexpected error: {}", e); client.err_stream( sid, ErrorCode::ErrorServer, Some("Internal server error".to_string()), &mut self.scratch, ); } continue; } }; let worker = match client.in_flight.get_mut(&sid) { Some(w) => w, None if messages.is_empty() => continue, None => { let (incoming_send, incoming_recv) = crossbeam_channel::unbounded(); let (outgoing_send, outgoing_recv) = crossbeam_channel::unbounded(); let outgoing_send = WakingSender::new(self.waker.clone(), outgoing_send); let outgoing_dgrams = client.dgram_send.clone(); let (done_send, done_recv) = oneshot::channel(); let done_send = WakingOneshot::new(self.waker.clone(), done_send); let state_clone = self.state.clone(); let max_dgram_len = match client.conn.dgram_max_writable_len() { Some(v) => v, None => bail!("client doesn't support datagrams"), }; let client_addr = client.remote_addr; self.thread_pool.execute(move || { let span = debug_span!("stream", sid, remote_addr = ?client_addr); let _guard = span.enter(); handlers::dispatch( state_clone, incoming_recv, outgoing_send, outgoing_dgrams, max_dgram_len, done_send, ); }); let worker = StreamWorker { incoming_messages: Some(incoming_send), outgoing_messages: outgoing_recv, done: done_recv, }; client.in_flight.entry(sid).or_insert(worker) } }; let incoming = worker.incoming_messages.as_ref().unwrap(); for msg in messages { if incoming.send(msg).is_err() { // The worker finished execution, so ignore any further // messages. break; } } if fin { // Signal to the worker that the peer has stopped sending // messages. worker.incoming_messages.take(); } } // Update the timeout timer. client.update_timeout()?; // Clean up partial data for closed streams. client .partial_reads .retain(|sid, _| !client.conn.stream_finished(*sid)); client .partial_writes .retain(|sid, _| !client.conn.stream_finished(*sid)); Ok(()) } } fn self_signed_tls_ctx(addr: SocketAddr) -> anyhow::Result { use boring::pkey::PKey; use boring::x509::X509; let ip = addr.ip(); assert!(!ip_rfc::global(&ip) && !ip.is_unspecified()); let certs = rcgen::generate_simple_self_signed(vec![ip.to_string()]) .context("generating self-signed certificates")?; let cert = X509::from_pem(certs.serialize_pem()?.as_bytes())?; let key = PKey::private_key_from_pem(certs.serialize_private_key_pem().as_bytes())?; let mut tls_ctx = boring::ssl::SslContextBuilder::new(boring::ssl::SslMethod::tls())?; tls_ctx.set_private_key(&key)?; tls_ctx.set_certificate(&cert)?; Ok(tls_ctx) } impl ClientConnection { fn update_timeout(&mut self) -> anyhow::Result<()> { if let Some(new_timeout) = self.conn.timeout() { self.timer.set_timeout(&new_timeout)?; } else { self.timer.disarm()?; } Ok(()) } fn read_messages( &mut self, sid: u64, scratch: &mut BytesMut, ) -> anyhow::Result<(Vec, bool)> { // Start with partial data from the previous call to read_messages. scratch.truncate(0); if let Some(partial) = self.partial_reads.remove(&sid) { scratch.unsplit(partial); } let mut off = scratch.len(); let mut stream_fin = false; loop { scratch.resize(off + protocol::MAX_MESSAGE_SIZE, 0); match self.conn.stream_recv(sid, &mut scratch[off..]) { Ok((len, fin)) => { off += len; if fin { stream_fin = true; break; } } Err(quiche::Error::Done) => { break; } Err(e) => return Err(e.into()), } } // Read messages (there may be multiple). scratch.truncate(off); let mut buf = scratch.split(); let mut messages = Vec::new(); while !buf.is_empty() { match protocol::decode_message(&buf) { Ok((msg, len)) => { trace!( conn_id = ?self.conn_id, stream_id = sid, len, "received {}", msg ); messages.push(msg); buf.advance(len); } Err(protocol::ProtocolError::InvalidMessageType(t, len)) => { warn!(msgtype = t, len, "ignoring unknown message type"); buf.advance(len); } Err(protocol::ProtocolError::ShortBuffer(n)) => { trace!( "partial message on stream {:?}:{}, need {} bytes", self.conn_id, sid, n ); self.partial_reads.insert(sid, buf); break; } Err(e) => return Err(e.into()), }; } Ok((messages, stream_fin)) } /// Send a message on a stream. Returns Ok(false) if the stream is full. fn write_message( &mut self, sid: u64, msg: protocol::MessageType, fin: bool, scratch: &mut BytesMut, ) -> anyhow::Result { scratch.resize(protocol::MAX_MESSAGE_SIZE, 0); let len = protocol::encode_message(&msg, scratch).context(format!("failed to encode {}", msg))?; trace!(len, "sending {}", msg); match self.conn.stream_send(sid, &scratch[..len], fin) { Ok(n) if n != len => { // Partial write. assert!(n < len); trace!(n, "partial write"); let partial = scratch.split_to(len).split_off(n).freeze(); let old = self.partial_writes.insert(sid, partial); assert_eq!(None, old); Ok(false) } Err(quiche::Error::Done) => { trace!("stream blocked"); let data = scratch.split_to(len).freeze(); let old = self.partial_writes.insert(sid, data); assert_eq!(None, old); Ok(false) } v => { assert_eq!(len, v?); Ok(true) } } } /// Flushes previous partial writes. fn flush_partial_write(&mut self, sid: u64) -> anyhow::Result { use std::collections::btree_map::Entry; match self.partial_writes.entry(sid) { Entry::Vacant(_) => Ok(true), Entry::Occupied(mut entry) => { let partial = entry.get().clone(); trace!(len = partial.len(), "flushing previous partial"); match self.conn.stream_send(sid, &partial, false) { Ok(n) if n != entry.get().len() => { // Partial write. entry.get_mut().advance(n); trace!(len = entry.get().len(), "remaining partial"); Ok(false) } Ok(_) => { entry.remove(); Ok(true) } Err(quiche::Error::Done) => Ok(false), Err(e) => Err(anyhow!(e)), } } } } /// Send a message as a datagram. #[instrument(skip_all)] fn send_dgram(&mut self, msg: Vec) -> anyhow::Result<()> { trace!( conn_id = ?self.conn_id, len = msg.len(), "sending datagram", ); match self.conn.dgram_send_vec(msg) { Ok(_) => Ok(()), Err(quiche::Error::InvalidState) => Err(anyhow!("client doesn't support datagrams")), Err(e) => Err(e.into()), } } /// Send an Error message on a stream, then shut it down. fn err_stream( &mut self, sid: u64, code: ErrorCode, error: Option, scratch: &mut BytesMut, ) { // TODO actually send an error message let msg = protocol::Error { error_text: error.unwrap_or_default(), err_code: code.into(), }; let _ = self.write_message(sid, msg.into(), true, scratch); let _ = self .conn .stream_shutdown(sid, quiche::Shutdown::Read, code as u64); self.in_flight.remove(&sid); } fn send_periodic_keepalive(&mut self) -> quiche::Result<()> { const KEEPALIVE_PERIOD: time::Duration = time::Duration::from_secs(1); let now = time::Instant::now(); if self.in_flight.is_empty() || now.duration_since(self.last_keepalive) < KEEPALIVE_PERIOD { return Ok(()); } // Includes a PING in the next packet, but only if none of the frames // in that packet are ack-eliciting. self.last_keepalive = now; self.conn.send_ack_eliciting() } } fn gen_random_cid() -> quiche::ConnectionId<'static> { let mut cid = vec![0; quiche::MAX_CONN_ID_LEN]; let rng = rand::SystemRandom::new(); rng.fill(&mut cid).unwrap(); quiche::ConnectionId::from_vec(cid) } ================================================ FILE: mm-server/src/session/audio/buffer.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{collections::VecDeque, io}; use byteorder::{BigEndian as BE, LittleEndian as LE, ReadBytesExt as _}; use dasp::{interpolate::sinc::Sinc, ring_buffer, signal::interpolate::Converter}; use pulseaudio::protocol as pulse; /// Raw bytes go in, (optionally) resampled frames come out. pub enum PlaybackBuffer where F: dasp::Frame, { Passthrough(Buffer), Resampling { converter: Converter, Sinc<[F; 32]>>, output_rate: u32, }, } impl PlaybackBuffer where F: dasp::Frame, { pub fn new(sample_spec: pulse::SampleSpec, output_spec: pulse::SampleSpec) -> Self { assert_eq!(output_spec.channels as usize, F::CHANNELS); assert!( sample_spec.channels as usize >= F::CHANNELS, "upmixing is not supported" ); let buffer = Buffer::new(sample_spec); if sample_spec.sample_rate == output_spec.sample_rate { Self::Passthrough(buffer) } else { let ringbuf = ring_buffer::Fixed::from([F::EQUILIBRIUM; 32]); let interpolator = Sinc::new(ringbuf); Self::Resampling { converter: dasp::Signal::from_hz_to_hz( buffer, interpolator, sample_spec.sample_rate as f64, output_spec.sample_rate as f64, ), output_rate: output_spec.sample_rate, } } } fn buffer(&self) -> &Buffer { match self { PlaybackBuffer::Passthrough(ref buffer) => buffer, PlaybackBuffer::Resampling { converter, .. } => converter.source(), } } fn buffer_mut(&mut self) -> &mut Buffer { match self { PlaybackBuffer::Passthrough(ref mut buffer) => buffer, PlaybackBuffer::Resampling { converter, .. } => converter.source_mut(), } } pub fn len_bytes(&self) -> usize { self.buffer().len_bytes() } pub fn len_frames(&self) -> usize { self.buffer().len_frames() } pub fn is_empty(&self) -> bool { self.len_frames() == 0 } pub fn write(&mut self, payload: &[u8]) { let _ = io::Write::write_all(&mut self.buffer_mut().inner, payload); } /// Reads data from the buffer at the output sample rate, returning /// `num_frames` at that rate, or None if there's insufficient data. /// /// Dropping the returned signal removes the remaining unread data. pub fn drain(&mut self, num_frames: usize) -> Option + '_> { match self { PlaybackBuffer::Passthrough(buffer) => buffer.drain(num_frames).map(EitherSignal::Left), PlaybackBuffer::Resampling { ref mut converter, output_rate, } => { let buffer = converter.source(); let needed_frames = (buffer.sample_spec.sample_rate as usize * num_frames) .div_ceil(*output_rate as usize); if buffer.len_frames() < needed_frames { return None; } Some(EitherSignal::Right(Drain { signal: converter, remaining: num_frames, })) } } } pub fn clear(&mut self) { self.buffer_mut().inner.clear() } } enum EitherSignal { Left(L), Right(R), } impl dasp::Signal for EitherSignal where L: dasp::Signal, R: dasp::Signal, { type Frame = L::Frame; fn next(&mut self) -> Self::Frame { match self { EitherSignal::Left(s) => s.next(), EitherSignal::Right(s) => s.next(), } } fn is_exhausted(&self) -> bool { match self { EitherSignal::Left(s) => s.is_exhausted(), EitherSignal::Right(s) => s.is_exhausted(), } } } pub struct Buffer where F: dasp::Frame, { inner: VecDeque, sample_spec: pulse::SampleSpec, bpp: usize, _phantom: std::marker::PhantomData, } impl Buffer where F: dasp::Frame, { pub fn new(sample_spec: pulse::SampleSpec) -> Self { Self { inner: VecDeque::new(), sample_spec, bpp: sample_spec.format.bytes_per_sample(), _phantom: std::marker::PhantomData, } } fn len_bytes(&self) -> usize { self.inner.len() } fn len_frames(&self) -> usize { let input_channels = self.sample_spec.channels as usize; self.inner.len() / (input_channels * self.bpp) } fn read_frame(&mut self) -> Option { if self.len_frames() == 0 { return None; } let frame = F::from_fn(|_| self.read_sample().unwrap()); // Throw away additional channels. // TODO: be more intelligent about up/downmixing. let input_channels = self.sample_spec.channels as usize; for _ in 0..input_channels.saturating_sub(F::CHANNELS) { let _ = self.read_sample(); } Some(frame) } fn read_sample(&mut self) -> Option { use dasp::Sample; match self.sample_spec.format { pulse::SampleFormat::Float32Le => self.inner.read_f32::().ok(), pulse::SampleFormat::Float32Be => self.inner.read_f32::().ok(), pulse::SampleFormat::S16Le => self.inner.read_i16::().ok().map(Sample::from_sample), pulse::SampleFormat::S16Be => self.inner.read_i16::().ok().map(Sample::from_sample), pulse::SampleFormat::U8 => self.inner.read_u8().ok().map(Sample::from_sample), pulse::SampleFormat::S32Le => self.inner.read_i32::().ok().map(Sample::from_sample), pulse::SampleFormat::S32Be => self.inner.read_i32::().ok().map(Sample::from_sample), pulse::SampleFormat::S24Le => self.inner.read_i24::().ok().map(Sample::from_sample), _ => unimplemented!(), } } } impl Buffer where F: dasp::Frame, { fn drain(&mut self, num_frames: usize) -> Option> { if self.len_frames() < num_frames { return None; // Not enough data. } Some(Drain { signal: self, remaining: num_frames, }) } } impl dasp::Signal for Buffer where F: dasp::Frame, { type Frame = F; fn next(&mut self) -> Self::Frame { self.read_frame() .unwrap_or(::EQUILIBRIUM) } } struct Drain<'a, S: dasp::Signal> { signal: &'a mut S, remaining: usize, } impl dasp::Signal for Drain<'_, S> { type Frame = S::Frame; fn is_exhausted(&self) -> bool { self.remaining == 0 } fn next(&mut self) -> Self::Frame { if self.remaining == 0 { ::EQUILIBRIUM } else { self.remaining -= 1; dasp::Signal::next(&mut self.signal) } } } impl Drop for Drain<'_, S> { fn drop(&mut self) { for _ in 0..self.remaining { if self.signal.is_exhausted() { break; } let _ = dasp::Signal::next(&mut self.signal); } } } #[cfg(test)] mod test { use byteorder::WriteBytesExt as _; use dasp::Signal as _; use super::*; #[test] fn passthrough() { let mut buf = PlaybackBuffer::<[f32; 2]>::new( pulse::SampleSpec { format: pulse::SampleFormat::Float32Le, channels: 2, sample_rate: 24000, }, pulse::SampleSpec { format: pulse::SampleFormat::Float32Le, channels: 2, sample_rate: 24000, }, ); let mut data = vec![]; data.write_f32::(1.0).unwrap(); data.write_f32::(2.0).unwrap(); data.write_f32::(1.0).unwrap(); data.write_f32::(2.0).unwrap(); buf.write(&data); assert_eq!(buf.len_bytes(), 16); assert_eq!(buf.len_frames(), 2); assert!(buf.drain(3).is_none()); { let mut frames = buf.drain(2).unwrap(); assert_eq!(frames.next(), [1.0, 2.0]); assert_eq!(frames.next(), [1.0, 2.0]); assert!(frames.is_exhausted()); assert_eq!(frames.next(), [0.0, 0.0]); } assert!(buf.drain(1).is_none()); } #[test] fn downmix() { let mut buf = PlaybackBuffer::<[f32; 2]>::new( pulse::SampleSpec { format: pulse::SampleFormat::Float32Le, channels: 5, sample_rate: 24000, }, pulse::SampleSpec { format: pulse::SampleFormat::Float32Le, channels: 2, sample_rate: 24000, }, ); let mut data = vec![]; data.write_f32::(1.0).unwrap(); data.write_f32::(1.0).unwrap(); data.write_f32::(2.0).unwrap(); data.write_f32::(2.0).unwrap(); data.write_f32::(2.0).unwrap(); data.write_f32::(-1.0).unwrap(); data.write_f32::(-1.0).unwrap(); data.write_f32::(-2.0).unwrap(); data.write_f32::(-2.0).unwrap(); data.write_f32::(-2.0).unwrap(); buf.write(&data); assert_eq!(buf.len_bytes(), 40); assert_eq!(buf.len_frames(), 2); assert!(buf.drain(3).is_none()); { let mut frames = buf.drain(2).unwrap(); assert_eq!(frames.next(), [1.0, 1.0]); assert_eq!(frames.next(), [-1.0, -1.0]); assert!(frames.is_exhausted()); } assert!(buf.drain(1).is_none()); } } ================================================ FILE: mm-server/src/session/audio/pulse.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ collections::BTreeMap, ffi::{CStr, CString}, io::{prelude::*, Cursor}, path::Path, sync::Arc, time, }; use anyhow::{bail, Context}; use bytes::BytesMut; use crossbeam_channel as crossbeam; use cstr::cstr; use mio::net::UnixListener; use pulseaudio::protocol::{self as pulse, ClientInfoList}; use tracing::{debug, error, trace, warn}; use super::buffer::PlaybackBuffer; use super::EncodeFrame; use crate::{session::EPOCH, waking_sender::WakingSender}; const WAKER: mio::Token = mio::Token(0); const LISTENER: mio::Token = mio::Token(1); const CLOCK: mio::Token = mio::Token(2); // The server emits samples at this rate to the encoder. pub const CAPTURE_SAMPLE_RATE: u32 = 48000; pub const CAPTURE_CHANNEL_COUNT: u32 = 2; pub const CAPTURE_SPEC: pulse::SampleSpec = pulse::SampleSpec { format: pulse::SampleFormat::Float32Le, channels: CAPTURE_CHANNEL_COUNT as u8, sample_rate: CAPTURE_SAMPLE_RATE, }; // Run the clock every 10ms, which is the smallest Opus frame size. const CLOCK_RATE_HZ: u32 = 100; const SINK_NAME: &CStr = cstr!("magic_mirror"); #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum StreamState { Prebuffering(u64), // The number of bytes remaining before we can start 'playback'. Corked, Playing, Draining(u32), // The seq of the drain request, so we can ack it. } struct PlaybackStream { state: StreamState, buffer_attr: pulse::stream::BufferAttr, buffer: PlaybackBuffer<[f32; 2]>, requested_bytes: usize, played_bytes: u64, write_offset: u64, read_offset: u64, } struct Client { id: u32, socket: mio::net::UnixStream, protocol_version: u16, props: Option, incoming: BytesMut, playback_streams: BTreeMap, } struct ServerState { server_info: pulse::ServerInfo, cards: Vec, sinks: Vec, default_format_info: pulse::FormatInfo, next_playback_channel_index: u32, } pub struct PulseServer { listener: UnixListener, poll: mio::Poll, clock: mio_timerfd::TimerFd, close_rx: crossbeam::Receiver<()>, unencoded_tx: crossbeam::Sender, done_rx: crossbeam::Receiver, clients: BTreeMap, server_state: ServerState, } impl PulseServer { pub fn new( socket_name: impl AsRef, unencoded_tx: crossbeam::Sender, done_rx: crossbeam::Receiver, ) -> anyhow::Result<(Self, WakingSender<()>)> { let listener = UnixListener::bind(socket_name)?; let poll = mio::Poll::new()?; let waker = Arc::new(mio::Waker::new(poll.registry(), WAKER)?); let mut clock = mio_timerfd::TimerFd::new(mio_timerfd::ClockId::Monotonic)?; clock.set_timeout_interval(&time::Duration::from_nanos( 1_000_000_000 / CLOCK_RATE_HZ as u64, ))?; let mut server_info = pulse::ServerInfo { server_name: Some(cstr!("Magic Mirror").into()), server_version: Some(cstr!("0.0.1").into()), host_name: Some(CString::new("mmserver")?), default_sink_name: Some(SINK_NAME.into()), default_source_name: Some(SINK_NAME.into()), ..Default::default() }; // let dummy_card_index = 99; // let mut dummy_card = pulse::CardInfo { // index: dummy_card_index, // name: cstr!("Magic Mirror virtual output").into(), // props: pulse::Props::new(), // owner_module_index: None, // driver: Some(cstr!("magic-mirror").into()), // ports: vec![pulse::CardPortInfo { // name: cstr!("virtual-output-0").into(), // description: Some(cstr!("virtual output").into()), // priority: 0, // available: pulse::port_info::PortAvailable::Yes, // dir: pulse::port_info::PortDirection::Input, // props: pulse::Props::new(), // port_type: pulse::port_info::PortType::Network, // availability_group: None, //Some(cstr!("output").into()), // profiles: vec![cstr!("output:stereo").into()], // latency_offset: 0, // }], // profiles: vec![pulse::CardProfileInfo { // name: cstr!("output:stereo").into(), // description: Some(cstr!("Stereo").into()), // priority: 1000, // available: 1, // num_sinks: 1, // num_sources: 0, // }], // active_profile: Some(cstr!("output:stereo").into()), // }; // dummy_card.props.set( // pulse::Prop::DeviceDescription, // cstr!("Magic Mirror virtual output"), // ); let mut dummy_sink = pulse::SinkInfo::new_dummy(1); dummy_sink.name = SINK_NAME.into(); dummy_sink.description = Some(cstr!("Magic Mirror virtual output").into()); dummy_sink.sample_spec = pulse::SampleSpec { format: pulse::SampleFormat::Float32Le, channels: 2, sample_rate: CAPTURE_SAMPLE_RATE, }; server_info.channel_map = dummy_sink.channel_map; server_info.sample_spec = dummy_sink.sample_spec; // dummy_sink.card_index = Some(dummy_card_index); dummy_sink.ports[0].port_type = pulse::port_info::PortType::Network; dummy_sink.ports[0].description = Some(cstr!("virtual output").into()); let mut format_props = pulse::Props::new(); format_props.set(pulse::Prop::FormatChannels, cstr!("2")); format_props.set( pulse::Prop::FormatChannelMap, cstr!("front-left,front-right"), ); format_props.set(pulse::Prop::FormatSampleFormat, cstr!("float32le")); format_props.set( pulse::Prop::FormatRate, CString::new(CAPTURE_SAMPLE_RATE.to_string()).unwrap(), ); let default_format_info = pulse::FormatInfo { encoding: pulse::FormatEncoding::Pcm, props: format_props, }; dummy_sink.formats[0] = default_format_info.clone(); let (close_tx, close_rx) = crossbeam::bounded(1); let close_tx = WakingSender::new(waker.clone(), close_tx); Ok(( Self { listener, poll, clock, unencoded_tx, done_rx, close_rx, clients: BTreeMap::new(), server_state: ServerState { server_info, cards: vec![], // vec![dummy_card], sinks: vec![dummy_sink], default_format_info, next_playback_channel_index: 0, }, }, close_tx, )) } pub fn run(&mut self) -> anyhow::Result<()> { // Client tokens start from 1024. let mut next_client_token = 1024; self.poll .registry() .register(&mut self.clock, CLOCK, mio::Interest::READABLE)?; self.poll .registry() .register(&mut self.listener, LISTENER, mio::Interest::READABLE)?; let mut events = mio::Events::with_capacity(1024); loop { match self .poll .poll(&mut events, Some(time::Duration::from_secs(1))) { Ok(_) => (), Err(e) if e.kind() == std::io::ErrorKind::Interrupted => continue, Err(e) => return Err(e.into()), } match self.close_rx.try_recv() { Ok(()) | Err(crossbeam::TryRecvError::Disconnected) => return Ok(()), _ => (), } for event in events.iter() { match event.token() { CLOCK => { self.clock.read()?; self.clock_tick()?; } LISTENER => { let (mut socket, _) = self.listener.accept()?; let id = next_client_token as u32; let token = mio::Token(next_client_token); next_client_token += 1; debug!("pulseaudio client connected"); self.poll.registry().register( &mut socket, token, mio::Interest::READABLE, )?; self.clients.insert( token, Client { id, socket, protocol_version: pulse::MAX_VERSION, props: None, incoming: BytesMut::new(), playback_streams: BTreeMap::new(), }, ); } client_token if event.is_read_closed() => { if let Some(mut client) = self.clients.remove(&client_token) { debug!("pulseaudio client disconnected"); self.poll.registry().deregister(&mut client.socket)?; } } client_token if event.is_readable() && self.clients.contains_key(&client_token) => { if let Err(e) = self.recv(client_token) { error!("pulseaudio client error: {}", e); let mut client = self.clients.remove(&client_token).unwrap(); self.poll.registry().deregister(&mut client.socket)?; } } _ => (), } } } } fn recv(&mut self, client_token: mio::Token) -> anyhow::Result<()> { let client = self.clients.get_mut(&client_token).unwrap(); let mut read_size = 8192; 'read: loop { let off = client.incoming.len(); client.incoming.resize(off + read_size, 0); let n = match client.socket.read(&mut client.incoming[off..]) { Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => { client.incoming.truncate(off); return Ok(()); } v => v.context("recv error")?, }; client.incoming.truncate(off + n); loop { if client.incoming.len() < pulse::DESCRIPTOR_SIZE { read_size = 8192; continue 'read; } let desc = pulse::read_descriptor(&mut Cursor::new( &client.incoming[..pulse::DESCRIPTOR_SIZE], ))?; if client.incoming.len() < (desc.length as usize + pulse::DESCRIPTOR_SIZE) { read_size = desc.length as usize + pulse::DESCRIPTOR_SIZE - client.incoming.len(); continue 'read; } let _desc_bytes = client.incoming.split_to(pulse::DESCRIPTOR_SIZE); let payload = client.incoming.split_to(desc.length as usize).freeze(); if desc.channel == u32::MAX { let (seq, cmd) = match pulse::Command::read_tag_prefixed( &mut Cursor::new(payload), client.protocol_version, ) { Err(pulse::ProtocolError::Unimplemented(seq, cmd)) => { error!("received unimplemented command {:?}", cmd); pulse::write_error( &mut client.socket, seq, pulse::PulseError::NotImplemented, )?; continue; } v => v.context("decoding command")?, }; match handle_command(client, &mut self.server_state, seq, cmd) { Ok(()) => (), Err(e) => { let _ = pulse::write_error( &mut client.socket, seq, pulse::PulseError::Internal, ); return Err(e); } } } else { handle_stream_write(client, desc, &payload)?; } } } } fn clock_tick(&mut self) -> anyhow::Result<()> { let mut done_draining = Vec::new(); let capture_ts = EPOCH.elapsed().as_millis() as u64; let num_frames = CAPTURE_SAMPLE_RATE / CLOCK_RATE_HZ; let encode_len = num_frames * CAPTURE_CHANNEL_COUNT; let mut frame = match self.done_rx.try_recv() { Ok(mut frame) => { frame.buf.resize(encode_len as usize, 0.0); frame.buf.fill(0.0); Some(frame) } Err(crossbeam::TryRecvError::Empty) => { // No one's listening, but we still need to capture audio from // clients. None } Err(crossbeam::TryRecvError::Disconnected) => return Ok(()), }; for client in self.clients.values_mut() { done_draining.clear(); for (id, stream) in client.playback_streams.iter_mut() { if matches!( stream.state, StreamState::Playing | StreamState::Draining(_) ) { // Track how much we read. let buffer_len = stream.buffer.len_bytes(); // Check for underrun. let Some(frames) = stream.buffer.drain(num_frames as usize) else { error!(id, "buffer underrun for stream"); pulse::write_command_message( &mut client.socket, u32::MAX, pulse::Command::Underflow(pulse::Underflow { channel: *id, offset: 0, // TODO }), client.protocol_version, )?; if stream.buffer_attr.pre_buffering > 0 && matches!(stream.state, StreamState::Playing) { stream.state = StreamState::Prebuffering(stream.buffer_attr.pre_buffering as u64); // TODO: request in this case? } continue; }; if let Some(ref mut frame) = frame { let mut resampled = dasp::Signal::into_interleaved_samples(frames).into_iter(); for sample in &mut frame.buf { *sample += resampled.next().unwrap_or_default(); } } else { // Discard data even if we're not encoding it. drop(frames) } let read_len = buffer_len - stream.buffer.len_bytes(); trace!( id, read_len, buffer_len, new_len = buffer_len - read_len, "stream read" ); stream.read_offset += read_len as u64; stream.played_bytes += read_len as u64; // If we've drained the buffer, we can drop the stream. if matches!(stream.state, StreamState::Draining(_)) && stream.buffer.is_empty() { debug!(id, "finished draining stream"); done_draining.push(*id) } } // Request a write to fill the buffer. let bytes_needed = (stream.buffer_attr.target_length as usize) .saturating_sub(stream.buffer.len_bytes() + stream.requested_bytes); if matches!(stream.state, StreamState::Playing | StreamState::Corked) && bytes_needed >= stream.buffer_attr.minimum_request_length as usize { trace!(id, bytes_needed, "requesting buffer write"); stream.requested_bytes += bytes_needed; pulse::write_command_message( &mut client.socket, u32::MAX, pulse::Command::Request(pulse::Request { channel: *id, length: bytes_needed as u32, }), client.protocol_version, )?; } } for id in done_draining.iter() { let stream = client.playback_streams.remove(id).unwrap(); if let StreamState::Draining(drain_seq) = stream.state { pulse::write_ack_message(&mut client.socket, drain_seq)?; } else { unreachable!() } } } // Encode the frame. if let Some(mut frame) = frame { frame.capture_ts = capture_ts; self.unencoded_tx.send(frame)?; } Ok(()) } } fn handle_command( client: &mut Client, server: &mut ServerState, seq: u32, cmd: pulse::Command, ) -> anyhow::Result<()> { trace!("got command [{}]: {:#?}", seq, cmd); match cmd { pulse::Command::Auth(pulse::AuthParams { version, .. }) => { let version = std::cmp::min(version, pulse::MAX_VERSION); client.protocol_version = version; trace!("client protocol version: {}", version); write_reply( &mut client.socket, seq, &pulse::AuthReply { version: pulse::MAX_VERSION, ..Default::default() }, client.protocol_version, )?; Ok(()) } pulse::Command::SetClientName(props) => { client.props = Some(props); write_reply( &mut client.socket, seq, &pulse::SetClientNameReply { client_id: client.id, }, client.protocol_version, )?; Ok(()) } // Introspection commands. pulse::Command::GetServerInfo => { write_reply( &mut client.socket, seq, &server.server_info, client.protocol_version, )?; Ok(()) } pulse::Command::GetClientInfo(id) => { let reply = pulse::ClientInfo { index: id, ..Default::default() }; write_reply(&mut client.socket, seq, &reply, client.protocol_version)?; Ok(()) } pulse::Command::GetClientInfoList => { let reply: ClientInfoList = Vec::new(); // TODO write_reply(&mut client.socket, seq, &reply, client.protocol_version)?; Ok(()) } pulse::Command::GetCardInfo(_) => { write_reply( &mut client.socket, seq, &server.cards[0], client.protocol_version, )?; Ok(()) } pulse::Command::GetCardInfoList => { write_reply( &mut client.socket, seq, &server.cards, client.protocol_version, )?; Ok(()) } pulse::Command::GetSinkInfo(_) => { write_reply( &mut client.socket, seq, &server.sinks[0], client.protocol_version, )?; Ok(()) } pulse::Command::GetSinkInfoList => { write_reply( &mut client.socket, seq, &server.sinks, client.protocol_version, )?; Ok(()) } pulse::Command::GetSinkInputInfoList => { let reply: pulse::SinkInputInfoList = Vec::new(); write_reply(&mut client.socket, seq, &reply, client.protocol_version)?; Ok(()) } pulse::Command::GetSourceInfo(_) => { pulse::write_error(&mut client.socket, seq, pulse::PulseError::NoEntity)?; Ok(()) } pulse::Command::GetSourceOutputInfoList => { let reply: pulse::SourceOutputInfoList = Vec::new(); write_reply(&mut client.socket, seq, &reply, client.protocol_version)?; Ok(()) } pulse::Command::GetSourceInfoList => { let reply: pulse::SinkInfoList = Vec::new(); write_reply(&mut client.socket, seq, &reply, client.protocol_version)?; Ok(()) } pulse::Command::Subscribe(_) => { // We don't have any state changes that would warrant an event. pulse::write_ack_message(&mut client.socket, seq)?; Ok(()) } // Playback streams. pulse::Command::CreatePlaybackStream(params) => { let mut sample_spec = params.sample_spec; if sample_spec.format == pulse::SampleFormat::Invalid { if let Some(format) = params .formats .iter() .find_map(|f| match sample_spec_from_format(f) { Ok(ss) => Some(ss), Err(e) => { warn!("rejecting invalid format: {:#}", e); None } }) { sample_spec = format; } } // Check if the client set any buffer attrs // to -1, which indicates that we should // set the value. let mut buffer_attr = params.buffer_attr; configure_buffer(&mut buffer_attr, &sample_spec); let target_length = buffer_attr.target_length; let flags = params.flags; let mut stream = PlaybackStream { state: StreamState::Prebuffering(buffer_attr.pre_buffering as u64), buffer_attr, buffer: PlaybackBuffer::new(sample_spec, CAPTURE_SPEC), requested_bytes: target_length as usize, played_bytes: 0, write_offset: 0, read_offset: 0, }; // Returning a nonzero pre_buffering value always causes the stream // to start after prebuffering is complete, even if the client // requested otherwise. if buffer_attr.pre_buffering == 0 || flags.start_corked { stream.state = StreamState::Corked; } let channel = server.next_playback_channel_index; server.next_playback_channel_index += 1; client.playback_streams.insert(channel, stream); let reply = pulse::CreatePlaybackStreamReply { channel, stream_index: 500, sample_spec, channel_map: params.channel_map, buffer_attr, requested_bytes: target_length, sink_name: Some(SINK_NAME.into()), format: server.default_format_info.clone(), stream_latency: 10000, // TODO ..Default::default() }; write_reply(&mut client.socket, seq, &reply, client.protocol_version)?; Ok(()) } pulse::Command::DrainPlaybackStream(channel) => { if let Some(stream) = client.playback_streams.get_mut(&channel) { // The ack gets sent once we finish draining. stream.state = StreamState::Draining(seq); } Ok(()) } pulse::Command::GetPlaybackLatency(pulse::LatencyParams { channel, now, .. }) => { if let Some(stream) = client.playback_streams.get_mut(&channel) { let reply = pulse::PlaybackLatency { sink_usec: 10000, source_usec: 0, playing: matches!(stream.state, StreamState::Playing), local_time: now, remote_time: time::SystemTime::now(), write_offset: stream.write_offset as i64, read_offset: stream.read_offset as i64, underrun_for: u64::MAX, playing_for: stream.played_bytes, }; write_reply(&mut client.socket, seq, &reply, client.protocol_version)?; } Ok(()) } pulse::Command::UpdatePlaybackStreamProplist(_) => { pulse::write_ack_message(&mut client.socket, seq)?; Ok(()) } pulse::Command::CorkPlaybackStream(params) => { if let Some(stream) = client.playback_streams.get_mut(¶ms.channel) { match stream.state { StreamState::Corked if !params.cork => { let needed = stream .buffer_attr .target_length .saturating_sub(stream.buffer.len_bytes() as u32); stream.state = if needed > 0 { // Request bytes to fill the buffer. trace!( id = params.channel, bytes_needed = needed, "requesting buffer write" ); pulse::write_command_message( &mut client.socket, u32::MAX, pulse::Command::Request(pulse::Request { channel: params.channel, length: needed, }), client.protocol_version, )?; stream.requested_bytes = needed as usize; StreamState::Prebuffering(needed as u64) } else { StreamState::Playing }; } StreamState::Playing if params.cork => { stream.state = StreamState::Corked; } _ => (), } } pulse::write_ack_message(&mut client.socket, seq)?; Ok(()) } pulse::Command::FlushPlaybackStream(channel) => { if let Some(stream) = client.playback_streams.get_mut(&channel) { stream.buffer.clear(); stream.requested_bytes = 0; stream.played_bytes = 0; stream.read_offset = stream.write_offset; } pulse::write_ack_message(&mut client.socket, seq)?; Ok(()) } pulse::Command::Extension(_) => { pulse::write_error(&mut client.socket, seq, pulse::PulseError::NoExtension)?; Ok(()) } _ => { warn!("ignoring command {:?}", cmd.tag()); pulse::write_error(&mut client.socket, seq, pulse::PulseError::NotImplemented)?; Ok(()) } } } fn sample_spec_from_format(f: &pulse::FormatInfo) -> anyhow::Result { let format = f .props .get(pulse::Prop::FormatSampleFormat) .context("missing sample format")?; let rate = f .props .get(pulse::Prop::FormatRate) .context("missing sample rate")?; let channels = f .props .get(pulse::Prop::FormatChannels) .context("missing channel count")?; let format = match sanitize_prop_str(format)? { "s16le" => pulse::SampleFormat::S16Le, "s16be" => pulse::SampleFormat::S16Be, "u8" => pulse::SampleFormat::U8, "s32le" => pulse::SampleFormat::S32Le, "s32be" => pulse::SampleFormat::S32Be, "s24le" => pulse::SampleFormat::S24Le, "s24be" => pulse::SampleFormat::S24Be, "float32le" => pulse::SampleFormat::Float32Le, "float32be" => pulse::SampleFormat::Float32Be, _ => bail!("unsupported sample format: {:?}", format), }; let rate = sanitize_prop_str(rate)? .parse() .context(format!("invalid sample rate: {:?}", rate))?; let channels = sanitize_prop_str(channels)? .parse() .context(format!("invalid channel count: {:?}", channels))?; Ok(pulse::SampleSpec { format, sample_rate: rate, channels, }) } fn sanitize_prop_str(b: &[u8]) -> anyhow::Result<&str> { let s = CStr::from_bytes_with_nul(b).context("invalid string")?; let s = s.to_str().context("invalid utf-8")?; Ok(s.trim_matches('"')) } fn handle_stream_write( client: &mut Client, desc: pulse::Descriptor, payload: &[u8], ) -> anyhow::Result<()> { let stream = match client.playback_streams.get_mut(&desc.channel) { Some(v) => v, None => { bail!("invalid channel") } }; let buffer_len = stream.buffer.len_bytes(); trace!( id = desc.channel, ?stream.state, write_len = desc.length, current_len = buffer_len, future_len = buffer_len + desc.length as usize, "got stream write", ); // We don't handle seeks yet. if desc.offset != 0 { bail!("seeking not supported") } // Check for overrun. let remaining = (stream.buffer_attr.max_length as usize).saturating_sub(buffer_len); let overflow = payload.len().saturating_sub(remaining); let payload = if overflow > 0 { pulse::write_command_message( &mut client.socket, u32::MAX, pulse::Command::Overflow(overflow as u32), client.protocol_version, )?; &payload[..remaining as usize] } else { payload }; if let StreamState::Prebuffering(n) = stream.state { let needed = n.saturating_sub(payload.len() as u64); if needed > 0 { stream.state = StreamState::Prebuffering(needed) } else { debug!("starting playback for stream {}", desc.channel); pulse::write_command_message( &mut client.socket, u32::MAX, pulse::Command::Started(desc.channel), client.protocol_version, )?; stream.state = StreamState::Playing } } // Read the data into the buffer. stream.buffer.write(payload); stream.requested_bytes = stream.requested_bytes.saturating_sub(payload.len()); stream.write_offset += payload.len() as u64; Ok(()) } fn configure_buffer(attr: &mut pulse::stream::BufferAttr, spec: &pulse::SampleSpec) { let sample_size = spec.format.bytes_per_sample(); let frame_size = spec.channels as usize * sample_size; let len_10ms = (frame_size * spec.sample_rate as usize / 100) as u32; // Max length is min(200ms, client value). if attr.max_length == u32::MAX { attr.max_length = len_10ms * 20; } else { attr.max_length = attr .max_length .next_multiple_of(frame_size as u32) .min(len_10ms * 100); } // Minimum request length is max(5ms, client value). if attr.minimum_request_length == u32::MAX { attr.minimum_request_length = (len_10ms / 2).next_multiple_of(frame_size as u32); } else { attr.minimum_request_length = attr .minimum_request_length .next_multiple_of(frame_size as u32) .max(len_10ms / 2); } // Target length should be a multiple of the minimum request length, and by // default 20ms of audio. if attr.target_length == u32::MAX { attr.target_length = (len_10ms * 2) .next_multiple_of(attr.minimum_request_length) .min(attr.max_length); } else { attr.target_length = attr .target_length .next_multiple_of(attr.minimum_request_length) .max(len_10ms) .min(attr.max_length); if attr.target_length < (attr.minimum_request_length * 2) { attr.target_length = attr.minimum_request_length * 2; } } // Prebuffering shouldn't be more than the target length. if attr.pre_buffering == u32::MAX { attr.pre_buffering = attr.target_length; } else { attr.pre_buffering = attr .pre_buffering .next_multiple_of(attr.minimum_request_length) .min(attr.target_length); } } fn write_reply( socket: &mut mio::net::UnixStream, seq: u32, reply: &T, version: u16, ) -> anyhow::Result<()> { trace!("sending reply [{}] ({}): {:#?}", seq, version, reply); pulse::write_reply_message(socket, seq, reply, version)?; Ok(()) } ================================================ FILE: mm-server/src/session/audio.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{path::Path, sync::Arc}; use crate::{session::SessionHandle, waking_sender::WakingSender}; mod buffer; mod pulse; use anyhow::Context as _; use bytes::BytesMut; use crossbeam_channel as crossbeam; use parking_lot::Mutex; use pulse::PulseServer; use tracing::error; use super::AudioStreamParams; struct EncodeFrame { buf: Vec, capture_ts: u64, } struct Encoder { thread_handle: Option>>, close_tx: crossbeam::Sender<()>, } impl Drop for Encoder { fn drop(&mut self) { if let Some(handle) = self.thread_handle.take() { let _ = self.close_tx.send(()); match handle.join() { Ok(Ok(())) => (), Ok(Err(e)) => error!("audio encoder thread died: {}", e), Err(_) => error!("audio encoder thread panicked"), } } } } pub struct EncodePipeline { server_thread_handle: Option>>, server_close_tx: WakingSender<()>, compositor: SessionHandle, encoder: Option, done_tx: crossbeam::Sender, unencoded_rx: Arc>>, } impl EncodePipeline { pub fn new( compositor: SessionHandle, xdg_runtime_dir: &Path, ) -> anyhow::Result { // In this location, the server gets picked up without setting PULSE_SERVER // explicitly. std::fs::create_dir_all(Path::join(xdg_runtime_dir, "pulse"))?; let socket_name = Path::join(xdg_runtime_dir, "pulse/native"); // The pulse server reads empty frames from the done channel, fills // them, and sends them back over the undecoded channel. let (unencoded_tx, unencoded_rx) = crossbeam::unbounded(); let (done_tx, done_rx) = crossbeam::unbounded(); let (mut server, close_tx) = PulseServer::new(&socket_name, unencoded_tx, done_rx) .context("creating PulseAudio server")?; let server_handle = std::thread::Builder::new() .name(format!("pulse server ({})", socket_name.to_string_lossy())) .spawn(move || server.run())?; Ok(Self { server_thread_handle: Some(server_handle), server_close_tx: close_tx, compositor, encoder: None, done_tx, // We wrap the receiver in a mutex to ensure that only one encoder // is interacting with the pulse server at a time (and because it's // not Clone). unencoded_rx: Arc::new(Mutex::new(unencoded_rx)), }) } pub fn stop_stream(&mut self) { self.encoder = None; } pub fn restart_stream(&mut self, params: AudioStreamParams) -> anyhow::Result<()> { // TODO: pass sample rate on input frames, do resampling on the pulse side. // For now we only support 48khz stereo anyway. assert_eq!(params.sample_rate, pulse::CAPTURE_SAMPLE_RATE); assert_eq!(params.channels, pulse::CAPTURE_CHANNEL_COUNT); assert!(self.encoder.is_none()); let done_tx = self.done_tx.clone(); let unencoded_rx = self.unencoded_rx.clone(); let (close_tx, close_rx) = crossbeam::unbounded(); let ch = match params.channels { 1 => opus::Channels::Mono, 2 => opus::Channels::Stereo, _ => panic!("unsupported number of channels: {}", params.channels), }; let mut encoder = opus::Encoder::new(params.sample_rate, ch, opus::Application::LowDelay) .context("failed to create opus encoder")?; let compositor = self.compositor.clone(); let thread_handle = std::thread::Builder::new() .name("audio encode".into()) .spawn(move || { // Lock the receiver until the encoder thread exits. let unencoded_rx = unencoded_rx.lock(); let mut signal_restart = true; let mut buf = BytesMut::new(); let mut in_flight = 3; for _ in 0..in_flight { if done_tx .send(EncodeFrame { buf: Vec::new(), capture_ts: 0, }) .is_err() { return Ok(()); } } let mut closing = false; while in_flight > 0 { if let Ok(()) = close_rx.try_recv() { closing = true; } let frame = match unencoded_rx.recv() { Ok(frame) => frame, Err(_) => return Ok(()), // Pulse server hung up. }; buf.resize(frame.buf.len(), 0); let len = encoder.encode_float(&frame.buf, &mut buf)?; compositor.dispatch_audio_frame( frame.capture_ts, buf.split_to(len).freeze(), signal_restart, ); signal_restart = false; if !closing { match done_tx.send(frame) { Ok(()) => (), Err(_) => return Ok(()), // Pulse server hung up. } } else { in_flight -= 1; } } Ok(()) })?; self.encoder = Some(Encoder { thread_handle: Some(thread_handle), close_tx, }); Ok(()) } } impl Drop for EncodePipeline { fn drop(&mut self) { let _ = self.server_close_tx.send(()); if let Some(handle) = self.server_thread_handle.take() { match handle.join() { Ok(Ok(())) => (), Ok(Err(e)) => error!("pulseaudio server error: {}", e), Err(_) => error!("pulseaudio server panicked"), } } } } ================================================ FILE: mm-server/src/session/compositor/buffers/modifiers.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{os::fd::AsFd as _, sync::Arc}; use ash::vk; use cstr::cstr; use drm_fourcc::{DrmFormat, DrmFourcc}; use tracing::{debug, trace}; use wayland_protocols::wp::linux_dmabuf::zv1::server::zwp_linux_dmabuf_feedback_v1; use crate::{ session::compositor::{sealed::SealedFile, Compositor}, vulkan::VkContext, }; // Note that Mesa will throw out a format if either the opaque or alpha version // is missing. For example, Argb8888 requires Xrgb8888, and vice versa. /// (Fourcc, VkFormat, alpha, bpp) pub const SUPPORTED_DRM_FORMATS: &[(DrmFourcc, vk::Format, bool, usize)] = &[ (DrmFourcc::Argb8888, vk::Format::B8G8R8A8_UNORM, false, 4), (DrmFourcc::Xrgb8888, vk::Format::B8G8R8A8_UNORM, true, 4), (DrmFourcc::Abgr8888, vk::Format::R8G8B8A8_UNORM, false, 4), (DrmFourcc::Xbgr8888, vk::Format::R8G8B8A8_UNORM, true, 4), ( DrmFourcc::Argb16161616f, vk::Format::R16G16B16A16_SFLOAT, false, 8, ), ( DrmFourcc::Xrgb16161616f, vk::Format::R16G16B16A16_SFLOAT, true, 8, ), ( DrmFourcc::Abgr16161616f, vk::Format::R16G16B16A16_SFLOAT, false, 8, ), ( DrmFourcc::Xbgr16161616f, vk::Format::R16G16B16A16_SFLOAT, true, 8, ), ]; pub fn fourcc_to_vk(fourcc: DrmFourcc) -> Option<(vk::Format, bool)> { SUPPORTED_DRM_FORMATS .iter() .find(|(f, _, _, _)| *f == fourcc) .map(|(_, vk, ignore_alpha, _)| (*vk, *ignore_alpha)) } pub fn fourcc_bpp(fourcc: DrmFourcc) -> Option { SUPPORTED_DRM_FORMATS .iter() .find(|(f, _, _, _)| *f == fourcc) .map(|(_, _, _, bpp)| *bpp) } pub struct CachedDmabufFeedback { drm_node: u64, formats: Vec, table: SealedFile, } impl CachedDmabufFeedback { pub fn contains(&self, modifier: u64) -> bool { self.formats .iter() .any(|format| format.modifier == modifier) } pub fn new(vk: Arc) -> anyhow::Result { let formats = unsafe { SUPPORTED_DRM_FORMATS .iter() .flat_map(|(fourcc, format, _, _)| { let mods = query_drm_format_modifiers(&vk.instance, vk.device_info.pdevice, *format); mods.into_iter().filter_map(|props| { if props.drm_format_modifier_plane_count == 1 { let modifier = props.drm_format_modifier.into(); assert!(verify_dmabuf_support( vk.clone(), *format, modifier, vk::ImageUsageFlags::SAMPLED, )); Some(DrmFormat { code: *fourcc, modifier, }) } else { None } }) }) .collect::>() }; let mut table = vec![0_u8; 16 * formats.len()]; for (idx, format) in formats.iter().enumerate() { let off = idx * 16; let modifier: u64 = format.modifier.into(); let code = format.code as u32; trace!(idx, code = ?format.code, code, modifier, "adding format to table"); table[off..off + 4].copy_from_slice(&code.to_ne_bytes()); table[off + 8..off + 16].copy_from_slice(&modifier.to_ne_bytes()); } Ok(Self { formats, drm_node: vk.device_info.drm_node, table: SealedFile::new(cstr!("dmabuf_formats"), &table)?, }) } } impl Compositor { pub fn emit_dmabuf_feedback( &self, feedback: &zwp_linux_dmabuf_feedback_v1::ZwpLinuxDmabufFeedbackV1, ) { let fb = &self.cached_dmabuf_feedback; let dev = fb.drm_node.to_ne_bytes().to_vec(); feedback.main_device(dev.clone()); feedback.format_table(fb.table.as_fd(), fb.table.size() as u32); feedback.tranche_target_device(dev.clone()); feedback.tranche_flags(zwp_linux_dmabuf_feedback_v1::TrancheFlags::empty()); let indices = (0..(fb.formats.len() as u16)) .flat_map(|i| i.to_ne_bytes()) .collect::>(); feedback.tranche_formats(indices); feedback.tranche_done(); feedback.done(); } } unsafe fn query_drm_format_modifiers( instance: &ash::Instance, device: vk::PhysicalDevice, format: vk::Format, ) -> Vec { let count = { let mut modifiers = vk::DrmFormatModifierPropertiesListEXT::default(); let mut format_props = vk::FormatProperties2::default().push_next(&mut modifiers); instance.get_physical_device_format_properties2(device, format, &mut format_props); modifiers.drm_format_modifier_count }; let mut res = vec![vk::DrmFormatModifierPropertiesEXT::default(); count as usize]; let mut modifiers = vk::DrmFormatModifierPropertiesListEXT::default().drm_format_modifier_properties(&mut res); let mut format_props = vk::FormatProperties2::default().push_next(&mut modifiers); instance.get_physical_device_format_properties2(device, format, &mut format_props); res } pub unsafe fn verify_dmabuf_support( vk: Arc, format: vk::Format, modifier: drm_fourcc::DrmModifier, usage: vk::ImageUsageFlags, ) -> bool { let mut drm_props = vk::ExternalImageFormatProperties::default(); let mut props = vk::ImageFormatProperties2::default().push_next(&mut drm_props); let mut modifier_info = vk::PhysicalDeviceImageDrmFormatModifierInfoEXT::default() .drm_format_modifier(modifier.into()); let mut external_format_info = vk::PhysicalDeviceExternalImageFormatInfo::default() .handle_type(vk::ExternalMemoryHandleTypeFlags::DMA_BUF_EXT); let format_info = vk::PhysicalDeviceImageFormatInfo2::default() .format(format) .ty(vk::ImageType::TYPE_2D) .usage(usage) .tiling(vk::ImageTiling::DRM_FORMAT_MODIFIER_EXT) .push_next(&mut external_format_info) .push_next(&mut modifier_info); match vk.instance.get_physical_device_image_format_properties2( vk.device_info.pdevice, &format_info, &mut props, ) { Ok(_) => (), Err(_) => { debug!(?format, ?modifier, "format not supported for dma import"); return false; } } drm_props .external_memory_properties .compatible_handle_types .contains(vk::ExternalMemoryHandleTypeFlags::DMA_BUF_EXT) } ================================================ FILE: mm-server/src/session/compositor/buffers/syncobj_timeline.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ io, os::fd::{AsFd as _, OwnedFd}, sync::Arc, }; use ash::vk; use drm::control::{syncobj, Device as _}; use tracing::{instrument, trace}; use wayland_protocols::wp::linux_drm_syncobj::v1::server::wp_linux_drm_syncobj_timeline_v1; use crate::vulkan::VkContext; slotmap::new_key_type! { pub struct SyncobjTimelineKey; } pub struct SyncobjTimeline(Arc); struct TimelineHandle { pub _wp_syncobj_timeline: wp_linux_drm_syncobj_timeline_v1::WpLinuxDrmSyncobjTimelineV1, handle: syncobj::Handle, vk: Arc, } impl Drop for TimelineHandle { fn drop(&mut self) { let _ = self.vk.drm_device.destroy_syncobj(self.handle); } } #[derive(Clone)] pub struct SyncobjTimelinePoint { pub value: u64, handle: Arc, } impl SyncobjTimelinePoint { pub fn signal(&self) -> io::Result<()> { trace!(handle = ?self.handle.handle, value = self.value, "signaling timeline point"); self.handle .vk .drm_device .syncobj_timeline_signal(&[self.handle.handle], &[self.value]) } #[instrument(skip_all)] pub fn import_as_semaphore(&self, semaphore: vk::Semaphore) -> anyhow::Result<()> { trace!( value = self.value, ?semaphore, "importing timeline point as semaphore" ); let device = &self.handle.vk.drm_device; // First, we export a sync file by creating a new syncobj and copying // the timeline point to 0 on the new syncobj. let syncobj = device.create_syncobj(false)?; scopeguard::defer! { self.handle.vk .drm_device .destroy_syncobj(syncobj) .expect("failed to destroy syncobj") }; device.syncobj_timeline_transfer(self.handle.handle, syncobj, self.value, 0)?; let sync_fd = device.syncobj_to_fd(syncobj, true)?; // Then we can import it into a vulkan semaphore. unsafe { super::import_sync_file_as_semaphore(self.handle.vk.clone(), sync_fd, semaphore) } } } impl SyncobjTimeline { pub fn import( vk: Arc, wp_syncobj_timeline: wp_linux_drm_syncobj_timeline_v1::WpLinuxDrmSyncobjTimelineV1, fd: OwnedFd, ) -> io::Result { let handle = vk.drm_device.fd_to_syncobj(fd.as_fd(), false)?; Ok(Self(Arc::new(TimelineHandle { _wp_syncobj_timeline: wp_syncobj_timeline, handle, vk, }))) } pub fn new_timeline_point(&self, value: u64) -> SyncobjTimelinePoint { SyncobjTimelinePoint { value, handle: self.0.clone(), } } } ================================================ FILE: mm-server/src/session/compositor/buffers.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 mod modifiers; mod syncobj_timeline; use std::{ collections::BTreeSet, os::fd::{AsFd, AsRawFd, FromRawFd as _, IntoRawFd as _, OwnedFd}, sync::{Arc, RwLock}, }; use anyhow::{bail, Context as _}; use ash::vk; use drm_fourcc::DrmModifier; pub use modifiers::*; pub use syncobj_timeline::*; use tracing::{instrument, trace}; use wayland_server::{protocol::wl_buffer, Resource as _}; use crate::{ session::compositor::{shm::Pool, Compositor}, vulkan::{create_image_view, select_memory_type, VkContext, VkHostBuffer, VkImage}, }; slotmap::new_key_type! { pub struct BufferKey; } pub struct Buffer { pub wl_buffer: wl_buffer::WlBuffer, pub backing: BufferBacking, /// Next time we release this buffer, we should destroy it as well. pub needs_destruction: bool, } impl Buffer { pub fn dimensions(&self) -> glam::UVec2 { match self.backing { BufferBacking::Shm { format, .. } => (format.width, format.height).into(), BufferBacking::Dmabuf { format, .. } => (format.width, format.height).into(), } } } pub enum BufferBacking { Shm { format: PlaneMetadata, pool: Arc>, staging_buffer: VkHostBuffer, image: VkImage, /// Indicates that staging_buffer has been written to and needs to /// be uploaded to the image. dirty: bool, }, Dmabuf { format: PlaneMetadata, fd: OwnedFd, image: VkImage, }, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct PlaneMetadata { pub format: drm_fourcc::DrmFourcc, pub bpp: usize, pub width: u32, pub height: u32, pub stride: u32, pub offset: u32, } impl Compositor { #[instrument(skip_all)] pub fn release_buffers(&mut self) -> anyhow::Result<()> { // Check if any content updates have finished. let mut still_in_flight = Vec::new(); for content in self.in_flight_buffers.drain(..) { if let Some(tp) = &content.tp_done { if unsafe { !tp.poll()? } { // The frame using this content is still in-progress. still_in_flight.push(content); continue; } } if content.needs_release { let buffer = self .buffers .get(content.buffer) .expect("buffer has no entry"); trace!( wl_buffer = buffer.wl_buffer.id().protocol_id(), "explicitly releasing buffer" ); buffer.wl_buffer.release(); } if let Some((_, release)) = content.explicit_sync { release.signal()?; } // If we didn't move the presentation feedback into a separate queue, // that means we didn't use the content update and we should relate // that to the client. if let Some(feedback) = &content.wp_presentation_feedback { feedback.discarded(); } } self.in_flight_buffers = still_in_flight; // A buffer is in use if it's either part of an in-flight frame, or if // we're holding on to it because the client hasn't committed a new one // yet, and we may need to display it again. let used_buffers: BTreeSet = self .surfaces .values() .flat_map(|s| &s.content) .chain(self.in_flight_buffers.iter()) .map(|c| c.buffer) .collect(); self.buffers.retain(|id, buffer| { if !buffer.needs_destruction || used_buffers.contains(&id) { true } else { assert!(!buffer.wl_buffer.is_alive()); trace!( wl_buffer = buffer.wl_buffer.id().protocol_id(), "destroying buffer" ); false } }); Ok(()) } } #[instrument(skip_all)] pub fn import_shm_buffer( vk: Arc, wl_buffer: wl_buffer::WlBuffer, pool: Arc>, format: PlaneMetadata, ) -> anyhow::Result { let (vk_format, ignore_alpha) = match format.format { drm_fourcc::DrmFourcc::Argb8888 => (vk::Format::B8G8R8A8_UNORM, false), drm_fourcc::DrmFourcc::Xrgb8888 => (vk::Format::B8G8R8A8_UNORM, true), _ => unreachable!(), }; let len = format.stride * format.height; trace!(?format, len, "importing shm buffer"); let staging_buffer = VkHostBuffer::new( vk.clone(), vk.device_info.host_visible_mem_type_index, vk::BufferUsageFlags::TRANSFER_SRC, len as usize, )?; let image = VkImage::new( vk.clone(), vk_format, ignore_alpha, format.width, format.height, vk::ImageUsageFlags::TRANSFER_DST | vk::ImageUsageFlags::SAMPLED, vk::SharingMode::EXCLUSIVE, vk::ImageCreateFlags::empty(), )?; Ok(Buffer { wl_buffer, backing: BufferBacking::Shm { pool, staging_buffer, image, format, dirty: true, }, needs_destruction: false, }) } #[instrument(skip_all)] pub fn import_dmabuf_buffer( vk: Arc, wl_buffer: wl_buffer::WlBuffer, format: PlaneMetadata, modifier: DrmModifier, fd: OwnedFd, ) -> anyhow::Result { let PlaneMetadata { format: fourcc, width, height, stride, offset, .. } = format; let (vk_format, ignore_alpha) = match modifiers::fourcc_to_vk(fourcc) { Some(format) => format, None => bail!("unsupported dmabuf format: {:?}", format), }; unsafe { if !modifiers::verify_dmabuf_support( vk.clone(), vk_format, modifier, vk::ImageUsageFlags::SAMPLED, ) { bail!("unsupported dmabuf format: {:?}", vk_format); } } trace!( ?fourcc, ?vk_format, width, height, offset, stride, fd = fd.as_fd().as_raw_fd(), "importing dmabuf texture" ); // Vulkan wants to own the file descriptor, so we create a dup'd one just for // the driver. let vk_fd = fd.as_fd().try_clone_to_owned()?; let image = { let plane_layouts = [vk::SubresourceLayout { offset: offset as u64, size: 0, // Must be zero, according to the spec. row_pitch: stride as u64, ..Default::default() }]; let mut format_modifier_info = vk::ImageDrmFormatModifierExplicitCreateInfoEXT::default() .drm_format_modifier(modifier.into()) .plane_layouts(&plane_layouts); let mut external_memory_info = vk::ExternalMemoryImageCreateInfo::default() .handle_types(vk::ExternalMemoryHandleTypeFlags::DMA_BUF_EXT); let create_info = vk::ImageCreateInfo::default() .image_type(vk::ImageType::TYPE_2D) .format(vk_format) .extent(vk::Extent3D { width, height, depth: 1, }) .mip_levels(1) .array_layers(1) .tiling(vk::ImageTiling::DRM_FORMAT_MODIFIER_EXT) .samples(vk::SampleCountFlags::TYPE_1) .usage(vk::ImageUsageFlags::SAMPLED) .sharing_mode(vk::SharingMode::EXCLUSIVE) .initial_layout(vk::ImageLayout::UNDEFINED) .push_next(&mut external_memory_info) .push_next(&mut format_modifier_info); unsafe { vk.device.create_image(&create_info, None).unwrap() } }; let memory = { let mut fd_props = vk::MemoryFdPropertiesKHR::default(); unsafe { vk.external_memory_api.get_memory_fd_properties( vk::ExternalMemoryHandleTypeFlags::DMA_BUF_EXT, vk_fd.as_raw_fd(), &mut fd_props, )?; }; let image_memory_req = unsafe { vk.device.get_image_memory_requirements(image) }; let memory_type_index = select_memory_type( &vk.device_info.memory_props, vk::MemoryPropertyFlags::empty(), Some(image_memory_req.memory_type_bits & fd_props.memory_type_bits), ); trace!( ?fd_props, ?memory_type_index, ?image_memory_req, "memory import for dmabuf" ); let mut external_mem_info = vk::ImportMemoryFdInfoKHR::default() .handle_type(vk::ExternalMemoryHandleTypeFlags::DMA_BUF_EXT) .fd(vk_fd.into_raw_fd()); // Vulkan owns the fd now. // Technically we can query whether this is required, but it doesn't // hurt anyways. It seems to be only required on some NVIDIA cards. let mut dedicated_memory_info = vk::MemoryDedicatedAllocateInfo::default().image(image); let image_allocate_info = vk::MemoryAllocateInfo::default() .allocation_size(image_memory_req.size) .push_next(&mut external_mem_info) .push_next(&mut dedicated_memory_info); unsafe { vk.device.allocate_memory(&image_allocate_info, None)? } }; unsafe { vk.device.bind_image_memory(image, memory, 0)?; } let view = unsafe { create_image_view(&vk.device, image, vk_format, ignore_alpha)? }; let image = VkImage::wrap(vk.clone(), image, view, memory, vk_format, width, height); Ok(Buffer { wl_buffer, backing: BufferBacking::Dmabuf { format, fd, image }, needs_destruction: false, }) } pub fn validate_buffer_parameters( offset: i32, width: i32, height: i32, stride: i32, bpp: usize, ) -> Result<(), String> { if offset < 0 { return Err("Negative offset.".to_string()); } if width <= 0 || height <= 0 { return Err("Invalid height or width.".to_string()); } if stride <= 0 || stride.checked_div(bpp as i32).unwrap_or(0) < width || stride.checked_mul(height).is_none() { return Err("Invalid stride.".to_string()); } if let Some(size) = stride.checked_mul(height) { if offset.checked_add(size).is_none() { return Err("Invalid offset.".to_string()); } } else { return Err("Invalid total size.".to_string()); } Ok(()) } #[allow(dead_code)] mod ioctl { use std::{ffi::c_void, os::fd::RawFd}; use rustix::{ io::Errno, ioctl::{opcode, Opcode}, }; pub(super) const DMA_BUF_SYNC_READ: u32 = 1 << 0; pub(super) const DMA_BUF_SYNC_WRITE: u32 = 1 << 1; #[repr(C)] #[allow(non_camel_case_types)] struct dma_buf_export_sync_file { pub flags: u32, pub fd: i32, } #[repr(C)] #[allow(non_camel_case_types)] struct dma_buf_import_sync_file { pub flags: u32, pub fd: i32, } pub(super) struct ExportSyncFile(dma_buf_export_sync_file); impl ExportSyncFile { pub(super) fn new(flags: u32) -> Self { Self(dma_buf_export_sync_file { flags, fd: -1 }) } } pub(super) struct ImportSyncFile(dma_buf_import_sync_file); impl ImportSyncFile { pub(super) fn new(fd: RawFd, flags: u32) -> Self { Self(dma_buf_import_sync_file { flags, fd }) } } unsafe impl rustix::ioctl::Ioctl for ExportSyncFile { type Output = RawFd; const IS_MUTATING: bool = true; fn opcode(&self) -> Opcode { opcode::read_write::(b'b', 2) } fn as_ptr(&mut self) -> *mut c_void { &mut self.0 as *mut dma_buf_export_sync_file as _ } unsafe fn output_from_ptr( out: rustix::ioctl::IoctlOutput, extract_output: *mut c_void, ) -> rustix::io::Result { let res: &mut dma_buf_export_sync_file = &mut *(extract_output as *mut _); if out != 0 { Err(rustix::io::Errno::from_raw_os_error(out)) } else if res.fd <= 0 { Err(Errno::INVAL) } else { Ok(res.fd) } } } unsafe impl rustix::ioctl::Ioctl for ImportSyncFile { type Output = (); const IS_MUTATING: bool = true; fn opcode(&self) -> Opcode { opcode::write::(b'b', 3) } fn as_ptr(&mut self) -> *mut c_void { &mut self.0 as *mut dma_buf_import_sync_file as _ } unsafe fn output_from_ptr( out: rustix::ioctl::IoctlOutput, _: *mut c_void, ) -> rustix::io::Result { if out == 0 { Ok(()) } else { Err(Errno::from_raw_os_error(out)) } } } } /// Retrieves a dmabuf fence, and uses it to set a semaphore. The semaphore will /// be triggered when the dmabuf texture is safe to read. Note that the spec /// insists that the semaphore must be waited on once set this way. #[instrument(skip_all)] pub fn import_dmabuf_fence_as_semaphore( vk: Arc, semaphore: vk::Semaphore, fd: impl AsFd, ) -> anyhow::Result<()> { let fd = fd.as_fd(); let sync_fd = unsafe { export_sync_file(fd, ioctl::DMA_BUF_SYNC_READ)? }; unsafe { import_sync_file_as_semaphore(vk, sync_fd, semaphore) } } #[instrument(skip_all)] pub unsafe fn import_sync_file_as_semaphore( vk: Arc, fd: OwnedFd, semaphore: vk::Semaphore, ) -> anyhow::Result<()> { let import_info = vk::ImportSemaphoreFdInfoKHR::default() .semaphore(semaphore) .handle_type(vk::ExternalSemaphoreHandleTypeFlags::SYNC_FD) .flags(vk::SemaphoreImportFlags::TEMPORARY) .fd(fd.into_raw_fd()); // Vulkan owns the fd now. vk.external_semaphore_api .import_semaphore_fd(&import_info)?; Ok(()) } /// Retrieves the fd of a sync file for a dmabuf. pub unsafe fn export_sync_file(dmabuf: impl AsFd, flags: u32) -> anyhow::Result { let raw_fd = rustix::ioctl::ioctl(dmabuf, ioctl::ExportSyncFile::new(flags)) .context("DMA_BUF_IOCTL_EXPORT_SYNC_FILE")?; Ok(OwnedFd::from_raw_fd(raw_fd)) } /// Attaches a sync file to a dmabuf. // TODO: the kernel docs and online resources state that we need to use this to // attach a "render finished" semaphore back onto the client buffers once we // start rendering. I think that's unecessary as long as we wait to call // `wl_buffer.release` until long after we're done compositing, which we do as // of this writing. #[allow(dead_code)] pub unsafe fn attach_sync_file( dmabuf: impl AsFd, flags: u32, sync_file: OwnedFd, // Closed on return. ) -> anyhow::Result<()> { rustix::ioctl::ioctl( dmabuf, ioctl::ImportSyncFile::new(sync_file.as_raw_fd(), flags), ) .context("DMA_BUF_IOCTL_IMPORT_SYNC_FILE")?; Ok(()) } ================================================ FILE: mm-server/src/session/compositor/dispatch/shm.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{cell::RefCell, os::fd::AsRawFd as _, rc::Rc}; use shipyard::{AddComponent as _, Component, EntityId, Get as _, NonSendSync, ViewMut}; use tracing::error; use wayland_server::{ protocol::{wl_shm, wl_shm_pool}, Resource as _, }; use crate::compositor::{ buffers::{validate_buffer_parameters, Buffer, PlaneMetadata}, shm::Pool, State, }; #[derive(Component, Debug)] pub struct ShmPool { wl_shm: wl_shm::WlShm, wl_shm_pool: wl_shm_pool::WlShmPool, pool: Rc>, } impl wayland_server::GlobalDispatch for State { fn bind( _state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { data_init.init(resource, ()); } } impl wayland_server::Dispatch for State { fn request( state: &mut Self, _client: &wayland_server::Client, wl_shm: &wl_shm::WlShm, request: wl_shm::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { wl_shm::Request::CreatePool { id, fd, size } => { if size <= 0 { wl_shm.post_error( wl_shm::Error::InvalidStride, "Negative or zero size provided.", ); } let fd_debug = fd.as_raw_fd(); let pool = match Pool::new(fd, size as usize) { Ok(p) => p, Err(err) => { error!(?err, fd = fd_debug, size, "failed to map client shm"); wl_shm.post_error(wl_shm::Error::InvalidFd, "mmap failed."); return; } }; let entity_id = state.world.add_entity(()); let wl_shm_pool = data_init.init(id, entity_id); // Required because Pool is not send or sync. let mut vm = state .world .borrow::>>() .expect("borrow failed"); // The pool shouldn't be unmapped until all buffers referencing it have been // destroyed. We represent this with an Rc. vm.add_component_unchecked( entity_id, ShmPool { wl_shm: wl_shm.clone(), wl_shm_pool, pool: Rc::new(RefCell::new(pool)), }, ); } _ => unreachable!(), } } } impl wayland_server::Dispatch for State { fn request( state: &mut Self, _client: &wayland_server::Client, resource: &wl_shm_pool::WlShmPool, request: wl_shm_pool::Request, entity_id: &EntityId, _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { wl_shm_pool::Request::CreateBuffer { id, offset, width, height, stride, format, } => { let vm = state .world .borrow::>>() .expect("borrow failed"); let shm_pool = vm.get(*entity_id).expect("pool has no entity"); if !matches!( format.into_result(), Ok(wl_shm::Format::Argb8888) | Ok(wl_shm::Format::Xrgb8888) ) { resource.post_error(wl_shm::Error::InvalidFormat, "Invalid format."); return; } if let Err(msg) = validate_buffer_parameters(offset, width, height, stride, 4) { resource.post_error(wl_shm::Error::InvalidStride, msg); return; } let buffer_size = stride * height; if (offset + buffer_size) as usize > shm_pool.pool.borrow().size { resource .post_error(wl_shm::Error::InvalidStride, "Size exceeds pool capacity."); return; } let entity_id = state.world.add_entity(()); let wl_buffer = data_init.init(id, entity_id); let buffer = Buffer::Shm { wl_buffer, offset: offset as u32, pool: shm_pool.pool.clone(), metadata: PlaneMetadata { width: width as u32, height: height as u32, stride: stride as u32, bpp: 4, }, }; let mut vm = state .world .borrow::>>() .expect("borrow failed"); vm.add_component_unchecked(entity_id, buffer); } wl_shm_pool::Request::Resize { size } => { let vm = state .world .borrow::>>() .expect("borrow failed"); let shm_pool = vm.get(*entity_id).expect("pool has no entity"); let mut pool = shm_pool.pool.borrow_mut(); if size <= pool.size as i32 { resource.post_error(wl_shm::Error::InvalidStride, "Invalid size provided."); return; } match pool.resize(size as usize) { Ok(_) => (), Err(err) => { error!(?err, "failed to remap shm"); resource.post_error(wl_shm::Error::InvalidFd, "mmap operation failed."); } } } wl_shm_pool::Request::Destroy => (), _ => unreachable!(), } } fn destroyed( state: &mut Self, _client: wayland_server::backend::ClientId, _resource: &wl_shm_pool::WlShmPool, entity_id: &EntityId, ) { // Buffers continue to be valid after their backing pool is destroyed. state.world.delete_entity(*entity_id); } } ================================================ FILE: mm-server/src/session/compositor/dispatch/wl_buffer.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use wayland_server::protocol::wl_buffer; use crate::session::compositor::{buffers::BufferKey, Compositor}; impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &wl_buffer::WlBuffer, request: wl_buffer::Request, _data: &BufferKey, _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { wl_buffer::Request::Destroy => (), _ => unreachable!(), } } fn destroyed( state: &mut Self, _client: wayland_server::backend::ClientId, _resource: &wl_buffer::WlBuffer, data: &BufferKey, ) { // We can't destroy the buffer until it's released. This marks it for // destruction later. if let Some(buffer) = state.buffers.get_mut(*data) { buffer.needs_destruction = true; } } } ================================================ FILE: mm-server/src/session/compositor/dispatch/wl_compositor.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use tracing::warn; use wayland_server::{ protocol::{wl_callback, wl_compositor, wl_output, wl_region, wl_surface}, Resource as _, }; use crate::{ pixel_scale::PixelScale, session::compositor::{ surface::{CommitError, PendingBuffer, Surface, SurfaceKey}, Compositor, }, }; impl wayland_server::GlobalDispatch for Compositor { fn bind( _state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { data_init.init(resource, ()); } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, _resource: &wl_compositor::WlCompositor, request: wl_compositor::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { wl_compositor::Request::CreateSurface { id } => { state .surfaces .insert_with_key(|k| Surface::new(data_init.init(id, k))); } wl_compositor::Request::CreateRegion { id } => { // We don't do anything with regions. data_init.init(id, ()); } _ => unreachable!(), } } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, resource: &wl_surface::WlSurface, request: wl_surface::Request, data: &SurfaceKey, _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { wl_surface::Request::Attach { buffer, x, y } => { if x != 0 || y != 0 { warn!(x, y, "ignoring nonzero buffer x/y offset") } state .surfaces .get_mut(*data) .expect("surface has no entry") .pending_buffer = match buffer { Some(buf) => { let buffer_id = *buf.data().expect("buffer has no userdata"); Some(PendingBuffer::Attach(buffer_id)) } None => Some(PendingBuffer::Detach), }; } wl_surface::Request::Frame { callback } => { let callback = data_init.init(callback, *data); state .surfaces .get_mut(*data) .expect("surface has no entry") .frame_callback .pending = Some(callback); } wl_surface::Request::Commit => { if let Err(CommitError(code, msg)) = state.surface_commit(*data) { resource.post_error(code, msg); } } wl_surface::Request::SetBufferTransform { transform } => { if !matches!(transform.into_result(), Ok(wl_output::Transform::Normal)) { warn!(?transform, "ignoring nonzero buffer rotation"); } } wl_surface::Request::SetBufferScale { scale } => { if scale < 1 { resource.post_error(wl_surface::Error::InvalidScale, "Scale must be >= 1"); return; } state .surfaces .get_mut(*data) .expect("surface has no entry") .buffer_scale .pending = Some(PixelScale(scale as u32, 1)); } wl_surface::Request::Offset { x, y } => { if x != 0 || y != 0 { warn!(x, y, "ignoring nonzero buffer offset"); } } // We ignore damage and don't do any related optimizations. wl_surface::Request::DamageBuffer { .. } => (), wl_surface::Request::Damage { .. } => (), // We ignore input and opaque regions, because we don't support subcompositing. wl_surface::Request::SetOpaqueRegion { .. } => (), wl_surface::Request::SetInputRegion { .. } => (), wl_surface::Request::Destroy => (), _ => unreachable!(), } } fn destroyed( state: &mut Self, _client: wayland_server::backend::ClientId, _resource: &wl_surface::WlSurface, data: &SurfaceKey, ) { state.surface_destroyed(*data); } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &wl_callback::WlCallback, _request: wl_callback::Request, _data: &SurfaceKey, _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &wl_region::WlRegion, _request: wl_region::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { } } ================================================ FILE: mm-server/src/session/compositor/dispatch/wl_data_device_manager.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use wayland_server::protocol::{wl_data_device, wl_data_device_manager, wl_data_source}; use crate::session::compositor::Compositor; // We offer a stubbed version of this protocol, because GTK chokes without it // being present. impl wayland_server::GlobalDispatch for Compositor { fn bind( _state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { data_init.init(resource, ()); } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &wl_data_device_manager::WlDataDeviceManager, request: wl_data_device_manager::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { wl_data_device_manager::Request::CreateDataSource { id } => { data_init.init(id, ()); } wl_data_device_manager::Request::GetDataDevice { id, .. } => { data_init.init(id, ()); } _ => (), } } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &wl_data_source::WlDataSource, _request: wl_data_source::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &wl_data_device::WlDataDevice, _request: wl_data_device::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { } } ================================================ FILE: mm-server/src/session/compositor/dispatch/wl_drm.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use crate::session::compositor::{protocols::wl_drm, Compositor}; impl wayland_server::GlobalDispatch for Compositor { fn bind( state: &mut Compositor, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Compositor>, ) { let wl_drm = data_init.init(resource, ()); wl_drm.device( dev_path(state.vk.device_info.drm_node).expect("failed to determine device node"), ); } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Compositor, _client: &wayland_server::Client, _resource: &wl_drm::WlDrm, _request: wl_drm::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Compositor>, ) { } } pub fn dev_path(dev: libc::dev_t) -> std::io::Result { let (major, minor) = unsafe { (libc::major(dev), libc::minor(dev)) }; assert_eq!(major, 226, "not a DRM device"); assert!(minor >= 128, "not a render node"); for f in std::fs::read_dir(format!("/sys/dev/char/{}:{}/device/drm", major, minor))?.flatten() { let name = f.file_name(); let name = name.to_string_lossy(); if name.starts_with("renderD") { let path = format!("/dev/dri/{}", name); std::fs::metadata(&path)?; return Ok(path); } } Err(std::io::Error::new( std::io::ErrorKind::NotFound, "no render node found", )) } ================================================ FILE: mm-server/src/session/compositor/dispatch/wl_output.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use wayland_server::protocol::wl_output; use crate::session::compositor::{output::configure_output, Compositor}; impl wayland_server::GlobalDispatch for Compositor { fn bind( state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { let wl_output = data_init.init(resource, ()); configure_output(&wl_output, state.display_params); state.output_proxies.push(wl_output); } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &wl_output::WlOutput, _request: wl_output::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { } fn destroyed( state: &mut Self, _client: wayland_server::backend::ClientId, resource: &wl_output::WlOutput, _data: &(), ) { state.output_proxies.retain(|o| o == resource); } } ================================================ FILE: mm-server/src/session/compositor/dispatch/wl_seat.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use wayland_server::{ protocol::{wl_keyboard, wl_pointer, wl_seat}, Resource as _, }; use crate::session::compositor::{seat::Cursor, Compositor}; impl wayland_server::GlobalDispatch for Compositor { fn bind( _state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { let wl_seat = data_init.init(resource, ()); wl_seat.capabilities(wl_seat::Capability::Keyboard | wl_seat::Capability::Pointer); } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, resource: &wl_seat::WlSeat, request: wl_seat::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { wl_seat::Request::GetPointer { id } => { let wl_pointer = data_init.init(id, ()); state.default_seat.get_pointer(wl_pointer); } wl_seat::Request::GetKeyboard { id } => { let wl_keyboard = data_init.init(id, ()); state.default_seat.get_keyboard(wl_keyboard); } wl_seat::Request::GetTouch { .. } => { resource.post_error( wl_seat::Error::MissingCapability, "No touch capability advertized.", ); } _ => (), } } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, resource: &wl_pointer::WlPointer, request: wl_pointer::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { wl_pointer::Request::SetCursor { surface, hotspot_x, hotspot_y, .. } => { let hotspot_x = hotspot_x.max(0) as u32; let hotspot_y = hotspot_y.max(0) as u32; let cursor = if let Some(wl_surface) = surface { Cursor::Surface { surface: *wl_surface.data().unwrap(), hotspot: (hotspot_x, hotspot_y).into(), needs_render: true, rendered: None, } } else { Cursor::Hidden }; state.set_cursor(resource, cursor); } wl_pointer::Request::Release => (), _ => (), } } fn destroyed( state: &mut Self, _client: wayland_server::backend::ClientId, resource: &wl_pointer::WlPointer, _data: &(), ) { state.default_seat.destroy_pointer(resource); } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &wl_keyboard::WlKeyboard, _request: wl_keyboard::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { } fn destroyed( state: &mut Self, _client: wayland_server::backend::ClientId, resource: &wl_keyboard::WlKeyboard, _data: &(), ) { state.default_seat.destroy_keyboard(resource); } } ================================================ FILE: mm-server/src/session/compositor/dispatch/wl_shm.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ os::fd::AsRawFd as _, sync::{Arc, RwLock}, }; use tracing::error; use wayland_server::{ protocol::{wl_shm, wl_shm_pool}, Resource as _, }; use crate::session::compositor::{ buffers::{fourcc_bpp, import_shm_buffer, validate_buffer_parameters, PlaneMetadata}, shm::{Pool, ShmPool, ShmPoolKey}, Compositor, }; impl wayland_server::GlobalDispatch for Compositor { fn bind( _state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { let wl_shm = data_init.init(resource, ()); wl_shm.format(wl_shm::Format::Xrgb8888); wl_shm.format(wl_shm::Format::Argb8888); } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, wl_shm: &wl_shm::WlShm, request: wl_shm::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { wl_shm::Request::CreatePool { id, fd, size } => { if size <= 0 { wl_shm.post_error( wl_shm::Error::InvalidStride, "Negative or zero size provided.", ); } let fd_debug = fd.as_raw_fd(); let pool = match Pool::new(fd, size as usize) { Ok(p) => p, Err(err) => { error!(?err, fd = fd_debug, size, "failed to map client shm"); wl_shm.post_error(wl_shm::Error::InvalidFd, "mmap failed."); return; } }; state.shm_pools.insert_with_key(|k| { let wl_shm_pool = data_init.init(id, k); ShmPool { _wl_shm: wl_shm.clone(), _wl_shm_pool: wl_shm_pool, // The pool shouldn't be unmapped until all buffers referencing it have been // destroyed. We represent this constraint with an Arc. pool: Arc::new(RwLock::new(pool)), } }); } _ => unreachable!(), } } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, resource: &wl_shm_pool::WlShmPool, request: wl_shm_pool::Request, data: &ShmPoolKey, _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { wl_shm_pool::Request::CreateBuffer { id, offset, width, height, stride, format, } => { let pool = state .shm_pools .get(*data) .expect("shm_pool has no entry") .pool .clone(); let format = match format.into_result() { Ok(wl_shm::Format::Argb8888) => drm_fourcc::DrmFourcc::Argb8888, Ok(wl_shm::Format::Xrgb8888) => drm_fourcc::DrmFourcc::Xrgb8888, _ => { resource.post_error(wl_shm::Error::InvalidFormat, "Invalid format."); return; } }; let Some(bpp) = fourcc_bpp(format) else { resource.post_error(wl_shm::Error::InvalidFormat, "Invalid format."); return; }; if let Err(msg) = validate_buffer_parameters(offset, width, height, stride, bpp) { resource.post_error(wl_shm::Error::InvalidStride, msg); return; } let buffer_size = stride * height; if (offset + buffer_size) as usize > pool.read().unwrap().size { resource .post_error(wl_shm::Error::InvalidStride, "Size exceeds pool capacity."); return; } let format = PlaneMetadata { format, bpp, width: width as u32, height: height as u32, stride: stride as u32, offset: offset as u32, }; let res = state.buffers.try_insert_with_key(|k| { let wl_buffer = data_init.init(id, k); import_shm_buffer(state.vk.clone(), wl_buffer, pool, format) }); if res.is_err() { resource.post_error(wl_shm::Error::InvalidFd, "Import failed."); }; } wl_shm_pool::Request::Resize { size } => { let shm_pool = state.shm_pools.get_mut(*data).expect("pool has no entry"); let mut pool = shm_pool.pool.write().unwrap(); if size <= pool.size as i32 { resource.post_error(wl_shm::Error::InvalidStride, "Invalid size provided."); return; } match pool.resize(size as usize) { Ok(_) => (), Err(err) => { error!(?err, "failed to remap shm"); resource.post_error(wl_shm::Error::InvalidFd, "mmap operation failed."); } } } wl_shm_pool::Request::Destroy => (), _ => unreachable!(), } } fn destroyed( state: &mut Self, _client: wayland_server::backend::ClientId, _resource: &wl_shm_pool::WlShmPool, data: &ShmPoolKey, ) { // Buffers continue to be valid after their backing pool is destroyed. state.shm_pools.remove(*data); } } ================================================ FILE: mm-server/src/session/compositor/dispatch/wp_fractional_scale.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use wayland_protocols::wp::fractional_scale::v1::server::{ wp_fractional_scale_manager_v1, wp_fractional_scale_v1, }; use wayland_server::Resource as _; use crate::session::compositor::{surface::SurfaceKey, Compositor}; impl wayland_server::GlobalDispatch for Compositor { fn bind( _state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { data_init.init(resource, ()); } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, resource: &wp_fractional_scale_manager_v1::WpFractionalScaleManagerV1, request: wp_fractional_scale_manager_v1::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { wp_fractional_scale_manager_v1::Request::GetFractionalScale { id, surface } => { if let Some(surface_key) = surface.data::() { let wp_fractional_scale = data_init.init(id, *surface_key); let surface = state .surfaces .get_mut(*surface_key) .expect("surface has no entry"); if surface.wp_fractional_scale.is_some() { resource.post_error( wp_fractional_scale_manager_v1::Error::FractionalScaleExists, "wp_fractional_scale object already exists for surface.", ) } surface.wp_fractional_scale = Some(wp_fractional_scale); } } wp_fractional_scale_manager_v1::Request::Destroy => (), _ => unreachable!(), } } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &wp_fractional_scale_v1::WpFractionalScaleV1, _request: wp_fractional_scale_v1::Request, _data: &SurfaceKey, _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { } } ================================================ FILE: mm-server/src/session/compositor/dispatch/wp_linux_dmabuf.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ os::fd::OwnedFd, sync::{Arc, RwLock}, }; use drm_fourcc::DrmFourcc; use tracing::error; use wayland_protocols::wp::linux_dmabuf::zv1::server::{ zwp_linux_buffer_params_v1, zwp_linux_dmabuf_feedback_v1, zwp_linux_dmabuf_v1, }; use wayland_server::{protocol::wl_buffer, Resource as _, WEnum}; use super::make_u64; use crate::session::compositor::{ buffers::{fourcc_bpp, import_dmabuf_buffer, validate_buffer_parameters, PlaneMetadata}, Compositor, }; impl wayland_server::GlobalDispatch for Compositor { fn bind( _state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { data_init.init(resource, ()); } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, _resource: &zwp_linux_dmabuf_v1::ZwpLinuxDmabufV1, request: zwp_linux_dmabuf_v1::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { zwp_linux_dmabuf_v1::Request::CreateParams { params_id } => { data_init.init(params_id, Arc::new(RwLock::new(Params::Empty))); } zwp_linux_dmabuf_v1::Request::GetDefaultFeedback { id } => { let feedback = data_init.init(id, ()); state.emit_dmabuf_feedback(&feedback); } zwp_linux_dmabuf_v1::Request::GetSurfaceFeedback { id, .. } => { let feedback = data_init.init(id, ()); state.emit_dmabuf_feedback(&feedback); } zwp_linux_dmabuf_v1::Request::Destroy => (), _ => (), } } } #[derive(Debug)] enum Params { Empty, Config { fd: OwnedFd, offset: u32, stride: u32, modifier: u64, }, Done, } impl wayland_server::Dispatch< zwp_linux_buffer_params_v1::ZwpLinuxBufferParamsV1, Arc>, > for Compositor { fn request( state: &mut Self, client: &wayland_server::Client, resource: &zwp_linux_buffer_params_v1::ZwpLinuxBufferParamsV1, request: zwp_linux_buffer_params_v1::Request, data: &Arc>, dh: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { zwp_linux_buffer_params_v1::Request::Add { fd, plane_idx, offset, stride, modifier_hi, modifier_lo, } => { if plane_idx > 0 { resource.post_error( zwp_linux_buffer_params_v1::Error::PlaneIdx, "Multiplane images are not supported.", ); return; } let mut params = data.write().unwrap(); if matches!(*params, Params::Config { .. } | Params::Done) { resource.post_error( zwp_linux_buffer_params_v1::Error::PlaneSet, "Plane 0 already configured.", ); return; } let modifier = make_u64(modifier_hi, modifier_lo); if resource.version() >= 4 && !state.cached_dmabuf_feedback.contains(modifier) { resource.post_error( zwp_linux_buffer_params_v1::Error::InvalidFormat, "Unsupported format.", ); } *params = Params::Config { fd, offset, stride, modifier, }; } zwp_linux_buffer_params_v1::Request::Create { width, height, format, flags, } => { let mut params = data.write().unwrap(); let format = match validate_create(¶ms, width, height, format, flags) { Ok(f) => f, Err((e, s)) => { resource.post_error(e, s); return; } }; let Params::Config { fd, modifier, .. } = std::mem::replace(&mut *params, Params::Done) else { unreachable!(); }; let res = state.buffers.try_insert_with_key(|k| { let wl_buffer = client.create_resource::(dh, 1, k)?; import_dmabuf_buffer(state.vk.clone(), wl_buffer, format, modifier.into(), fd) }); if res.is_err() { resource.failed(); }; } zwp_linux_buffer_params_v1::Request::CreateImmed { buffer_id, width, height, format, flags, } => { let mut params = data.write().unwrap(); let format = match validate_create(¶ms, width, height, format, flags) { Ok(f) => f, Err((e, s)) => { resource.post_error(e, s); return; } }; let Params::Config { fd, modifier, .. } = std::mem::replace(&mut *params, Params::Done) else { unreachable!(); }; let res = state.buffers.try_insert_with_key(|k| { let wl_buffer = data_init.init(buffer_id, k); import_dmabuf_buffer(state.vk.clone(), wl_buffer, format, modifier.into(), fd) }); if let Err(err) = res { error!(?err, "failed to import dmabuf"); resource.post_error( zwp_linux_buffer_params_v1::Error::InvalidWlBuffer, "Import failed.", ); }; } zwp_linux_buffer_params_v1::Request::Destroy => (), _ => (), } } } fn validate_create( params: &Params, width: i32, height: i32, format: u32, flags: WEnum, ) -> Result { if !flags .into_result() .map(|f| f.is_empty()) .unwrap_or_default() { return Err(( zwp_linux_buffer_params_v1::Error::InvalidFormat, "Invalid flags.".to_string(), )); } match *params { Params::Empty => { return Err(( zwp_linux_buffer_params_v1::Error::Incomplete, "Plane 0 not configured.".to_string(), )) } Params::Done => { return Err(( zwp_linux_buffer_params_v1::Error::AlreadyUsed, "Params already consumed.".to_string(), )) } _ => (), } let format = match DrmFourcc::try_from(format) { Ok(format) => format, Err(_) => { return Err(( zwp_linux_buffer_params_v1::Error::InvalidFormat, "Unknown format.".to_string(), )) } }; let Some(bpp) = fourcc_bpp(format) else { return Err(( zwp_linux_buffer_params_v1::Error::InvalidFormat, "Unsupported format.".to_string(), )); }; let Params::Config { offset, stride, .. } = params else { unreachable!() }; if let Err(s) = validate_buffer_parameters(*offset as i32, width, height, *stride as i32, bpp) { return Err((zwp_linux_buffer_params_v1::Error::InvalidDimensions, s)); } Ok(PlaneMetadata { format, bpp, width: width as u32, height: height as u32, stride: *stride, offset: *offset, }) } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &zwp_linux_dmabuf_feedback_v1::ZwpLinuxDmabufFeedbackV1, _request: zwp_linux_dmabuf_feedback_v1::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { } } ================================================ FILE: mm-server/src/session/compositor/dispatch/wp_linux_drm_syncobj.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use tracing::error; use wayland_protocols::wp::linux_drm_syncobj::v1::server::{ wp_linux_drm_syncobj_manager_v1, wp_linux_drm_syncobj_surface_v1, wp_linux_drm_syncobj_timeline_v1, }; use wayland_server::Resource as _; use crate::session::compositor::{ buffers::{SyncobjTimeline, SyncobjTimelineKey}, surface::SurfaceKey, Compositor, }; impl wayland_server::GlobalDispatch for Compositor { fn bind( _state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { data_init.init(resource, ()); } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, resource: &wp_linux_drm_syncobj_manager_v1::WpLinuxDrmSyncobjManagerV1, request: wp_linux_drm_syncobj_manager_v1::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { wp_linux_drm_syncobj_manager_v1::Request::GetSurface { id, surface } => { if let Some(surface_key) = surface.data::() { let wp_syncobj_surface = data_init.init(id, *surface_key); let surface = state .surfaces .get_mut(*surface_key) .expect("surface has no entry"); if surface.wp_syncobj_surface.is_some() { resource.post_error( wp_linux_drm_syncobj_manager_v1::Error::SurfaceExists, "A syncobj surface already exists for that wl_surface.", ); return; } surface.wp_syncobj_surface = Some(wp_syncobj_surface); } } wp_linux_drm_syncobj_manager_v1::Request::ImportTimeline { id, fd } => { if let Err(err) = state.imported_syncobj_timelines.try_insert_with_key(|k| { SyncobjTimeline::import(state.vk.clone(), data_init.init(id, k), fd) }) { error!("failed to import syncobj timeline: {err:#}"); resource.post_error( wp_linux_drm_syncobj_manager_v1::Error::InvalidTimeline, "Failed to import timeline.", ); } } wp_linux_drm_syncobj_manager_v1::Request::Destroy => (), _ => unreachable!(), } } } impl wayland_server::Dispatch< wp_linux_drm_syncobj_surface_v1::WpLinuxDrmSyncobjSurfaceV1, SurfaceKey, > for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, _resource: &wp_linux_drm_syncobj_surface_v1::WpLinuxDrmSyncobjSurfaceV1, request: wp_linux_drm_syncobj_surface_v1::Request, surface_key: &SurfaceKey, _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { wp_linux_drm_syncobj_surface_v1::Request::SetAcquirePoint { timeline, point_hi, point_lo, } => { let timeline = timeline .data::() .and_then(|key| state.imported_syncobj_timelines.get(*key)) .expect("timeline has no entry"); let surface = state .surfaces .get_mut(*surface_key) .expect("surface has no entry"); surface.pending_acquire_point = Some(timeline.new_timeline_point(super::make_u64(point_hi, point_lo))) } wp_linux_drm_syncobj_surface_v1::Request::SetReleasePoint { timeline, point_hi, point_lo, } => { let timeline = timeline .data::() .and_then(|key| state.imported_syncobj_timelines.get(*key)) .expect("timeline has no entry"); let surface = state .surfaces .get_mut(*surface_key) .expect("surface has no entry"); surface.pending_release_point = Some(timeline.new_timeline_point(super::make_u64(point_hi, point_lo))) } wp_linux_drm_syncobj_surface_v1::Request::Destroy => (), _ => unreachable!(), } } fn destroyed( state: &mut Self, _client: wayland_server::backend::ClientId, _resource: &wp_linux_drm_syncobj_surface_v1::WpLinuxDrmSyncobjSurfaceV1, surface_key: &SurfaceKey, ) { if let Some(surface) = state.surfaces.get_mut(*surface_key) { surface.wp_syncobj_surface = None; surface.pending_acquire_point = None; surface.pending_release_point = None; } } } impl wayland_server::Dispatch< wp_linux_drm_syncobj_timeline_v1::WpLinuxDrmSyncobjTimelineV1, SyncobjTimelineKey, > for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &wp_linux_drm_syncobj_timeline_v1::WpLinuxDrmSyncobjTimelineV1, _request: wp_linux_drm_syncobj_timeline_v1::Request, _data: &SyncobjTimelineKey, _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { } } ================================================ FILE: mm-server/src/session/compositor/dispatch/wp_pointer_constraints.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use wayland_protocols::wp::pointer_constraints::zv1::server::{ zwp_confined_pointer_v1, zwp_locked_pointer_v1, zwp_pointer_constraints_v1, }; use wayland_server::Resource as _; use crate::session::compositor::Compositor; impl wayland_server::GlobalDispatch for Compositor { fn bind( _state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { data_init.init(resource, ()); } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, resource: &zwp_pointer_constraints_v1::ZwpPointerConstraintsV1, request: zwp_pointer_constraints_v1::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { zwp_pointer_constraints_v1::Request::LockPointer { id, surface, pointer, lifetime, .. } => { if state.default_seat.has_lock(&surface) { resource.post_error( zwp_pointer_constraints_v1::Error::AlreadyConstrained, "There already exists a pointer constraint for that surface on this seat.", ); return; } let wp_locked_pointer = data_init.init(id, ()); let oneshot = lifetime.into_result().ok() == Some(zwp_pointer_constraints_v1::Lifetime::Oneshot); state .default_seat .create_lock(pointer, surface, wp_locked_pointer, oneshot); } zwp_pointer_constraints_v1::Request::ConfinePointer { id, .. } => { // We don't support confined pointers. data_init.init(id, ()); } zwp_pointer_constraints_v1::Request::Destroy => (), _ => (), } } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &zwp_locked_pointer_v1::ZwpLockedPointerV1, _request: zwp_locked_pointer_v1::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { } fn destroyed( state: &mut Self, _client: wayland_server::backend::ClientId, resource: &zwp_locked_pointer_v1::ZwpLockedPointerV1, _data: &(), ) { state.default_seat.destroy_lock(resource); } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &zwp_confined_pointer_v1::ZwpConfinedPointerV1, _request: zwp_confined_pointer_v1::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { } } ================================================ FILE: mm-server/src/session/compositor/dispatch/wp_presentation.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use wayland_protocols::wp::presentation_time::server::{wp_presentation, wp_presentation_feedback}; use wayland_server::Resource as _; use crate::session::compositor::{surface::SurfaceKey, Compositor}; impl wayland_server::GlobalDispatch for Compositor { fn bind( _state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { let wp_presentation = data_init.init(resource, ()); wp_presentation.clock_id(libc::CLOCK_MONOTONIC as u32) } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, _resource: &wp_presentation::WpPresentation, request: wp_presentation::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { wp_presentation::Request::Feedback { surface, callback: id, } => { if let Some(surface_key) = surface.data::() { let wp_presentation_feedback = data_init.init(id, *surface_key); // for wl_output in state.output_proxies.iter().filter(|wl_output| // wl_output.id().same_client_as(surface.id())) { // wp_presentation_feedback.sync_output() // } let surface = state .surfaces .get_mut(*surface_key) .expect("surface has no entry"); surface.pending_feedback = Some(wp_presentation_feedback); } } wp_presentation::Request::Destroy => (), _ => (), } } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &wp_presentation_feedback::WpPresentationFeedback, _request: wp_presentation_feedback::Request, _data: &SurfaceKey, _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { } } ================================================ FILE: mm-server/src/session/compositor/dispatch/wp_relative_pointer.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use wayland_protocols::wp::relative_pointer::zv1::server::{ zwp_relative_pointer_manager_v1, zwp_relative_pointer_v1, }; use crate::session::compositor::Compositor; impl wayland_server::GlobalDispatch for Compositor { fn bind( _state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { data_init.init(resource, ()); } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, _resource: &zwp_relative_pointer_manager_v1::ZwpRelativePointerManagerV1, request: zwp_relative_pointer_manager_v1::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { zwp_relative_pointer_manager_v1::Request::GetRelativePointer { id, pointer } => { let wp_relative_pointer = data_init.init(id, ()); state .default_seat .get_relative_pointer(wp_relative_pointer, pointer); } zwp_relative_pointer_manager_v1::Request::Destroy => (), _ => (), } } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &zwp_relative_pointer_v1::ZwpRelativePointerV1, _request: zwp_relative_pointer_v1::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { } fn destroyed( state: &mut Self, _client: wayland_server::backend::ClientId, resource: &zwp_relative_pointer_v1::ZwpRelativePointerV1, _data: &(), ) { state.default_seat.destroy_relative_pointer(resource); } } ================================================ FILE: mm-server/src/session/compositor/dispatch/wp_text_input.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use wayland_protocols::wp::text_input::zv3::server::{ zwp_text_input_manager_v3, zwp_text_input_v3, }; use crate::session::compositor::Compositor; impl wayland_server::GlobalDispatch for Compositor { fn bind( _state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { data_init.init(resource, ()); } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, _resource: &zwp_text_input_manager_v3::ZwpTextInputManagerV3, request: zwp_text_input_manager_v3::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { zwp_text_input_manager_v3::Request::GetTextInput { id, .. } => { let wp_text_input = data_init.init(id, ()); state.default_seat.get_text_input(wp_text_input); } zwp_text_input_manager_v3::Request::Destroy => (), _ => (), } } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &zwp_text_input_v3::ZwpTextInputV3, _request: zwp_text_input_v3::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { } fn destroyed( state: &mut Self, _client: wayland_server::backend::ClientId, resource: &zwp_text_input_v3::ZwpTextInputV3, _data: &(), ) { state.default_seat.destroy_text_input(resource); } } ================================================ FILE: mm-server/src/session/compositor/dispatch/xdg_shell.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use wayland_protocols::xdg::shell::server::{ xdg_popup, xdg_positioner, xdg_surface, xdg_toplevel, xdg_wm_base, }; use wayland_server::Resource as _; use crate::session::compositor::{ surface::{SurfaceKey, SurfaceRole}, Compositor, }; impl wayland_server::GlobalDispatch for Compositor { fn bind( _state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { data_init.init(resource, ()); } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, resource: &xdg_wm_base::XdgWmBase, request: xdg_wm_base::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { xdg_wm_base::Request::CreatePositioner { id } => { // Not used yet. data_init.init(id, ()); } xdg_wm_base::Request::GetXdgSurface { id, surface } => { let surface_id = surface .data::() .expect("surface has no userdata"); let surface = state .surfaces .get(*surface_id) .expect("surface has no entry"); if surface.content.is_some() { resource.post_error( xdg_surface::Error::AlreadyConstructed, "The surface already has a buffer attached.", ); } data_init.init(id, *surface_id); } xdg_wm_base::Request::Pong { .. } => (), xdg_wm_base::Request::Destroy => (), _ => unreachable!(), } } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, resource: &xdg_surface::XdgSurface, request: xdg_surface::Request, data: &SurfaceKey, _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { xdg_surface::Request::GetToplevel { id } => { let xdg_toplevel = data_init.init(id, *data); if !state.set_surface_role( *data, SurfaceRole::XdgToplevel { xdg_surface: resource.clone(), xdg_toplevel, }, ) { resource.post_error(xdg_wm_base::Error::Role, "Surface already has a role."); } } xdg_surface::Request::GetPopup { id, .. } => { data_init.init(id, ()); } xdg_surface::Request::AckConfigure { serial } => { let surface = state.surfaces.get_mut(*data).expect("surface has no entry"); match surface.pending_configure { Some(s) if serial == s => { surface.pending_configure = None; } Some(s) if serial < s => (), _ => resource.post_error(xdg_surface::Error::InvalidSerial, "Invalid serial."), } } xdg_surface::Request::SetWindowGeometry { .. } => (), xdg_surface::Request::Destroy => (), _ => unreachable!(), } } fn destroyed( state: &mut Self, _client: wayland_server::backend::ClientId, resource: &xdg_surface::XdgSurface, data: &SurfaceKey, ) { // Check that there isn't a surface role created from this object. match state .surfaces .get(*data) .and_then(|s| s.role.current.as_ref()) { Some(SurfaceRole::XdgToplevel { xdg_surface, .. }) if xdg_surface == resource => { resource.post_error( xdg_surface::Error::DefunctRoleObject, "The role created from this object must be destroyed first.", ); } _ => (), } } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &xdg_positioner::XdgPositioner, _request: xdg_positioner::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { // TODO we don't support popups at present. } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, resource: &xdg_popup::XdgPopup, request: xdg_popup::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { xdg_popup::Request::Grab { .. } => { // Immediately dismiss the popup, because we don't support popups. // resource.post_error(xdg_popup::Error::InvalidGrab, "Popups are not // supported."); resource.popup_done(); } xdg_popup::Request::Reposition { .. } => (), xdg_popup::Request::Destroy => (), _ => unreachable!(), } // TODO we don't support popups at present. } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, _resource: &xdg_toplevel::XdgToplevel, request: xdg_toplevel::Request, data: &SurfaceKey, _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { xdg_toplevel::Request::SetParent { .. } => (), xdg_toplevel::Request::SetTitle { title } => { state .surfaces .get_mut(*data) .expect("surface has no entry") .title = Some(title); } xdg_toplevel::Request::SetAppId { app_id } => { state .surfaces .get_mut(*data) .expect("surface has no entry") .app_id = Some(app_id); } xdg_toplevel::Request::ShowWindowMenu { .. } => (), xdg_toplevel::Request::Move { .. } => (), xdg_toplevel::Request::Resize { .. } => (), xdg_toplevel::Request::SetMaxSize { .. } => (), xdg_toplevel::Request::SetMinSize { .. } => (), xdg_toplevel::Request::SetMaximized => (), xdg_toplevel::Request::UnsetMaximized => (), xdg_toplevel::Request::SetFullscreen { .. } => (), xdg_toplevel::Request::UnsetFullscreen => (), xdg_toplevel::Request::SetMinimized => (), xdg_toplevel::Request::Destroy => (), _ => unreachable!(), } } fn destroyed( state: &mut Self, _client: wayland_server::backend::ClientId, resource: &xdg_toplevel::XdgToplevel, data: &SurfaceKey, ) { let surface = state.surfaces.get_mut(*data); match surface.as_ref().and_then(|s| s.role.current.as_ref()) { Some(SurfaceRole::XdgToplevel { xdg_toplevel, .. }) if xdg_toplevel == resource => { surface.unwrap().role.current = None; state.unmap_surface(*data); } _ => (), } } } ================================================ FILE: mm-server/src/session/compositor/dispatch/xwayland_shell.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use tracing::trace; use wayland_protocols::xwayland::shell::v1::server::{xwayland_shell_v1, xwayland_surface_v1}; use wayland_server::Resource as _; use crate::session::compositor::{ surface::{SurfaceKey, SurfaceRole}, ClientState, Compositor, }; impl wayland_server::GlobalDispatch for Compositor { fn bind( _state: &mut Self, _handle: &wayland_server::DisplayHandle, _client: &wayland_server::Client, resource: wayland_server::New, _global_data: &(), data_init: &mut wayland_server::DataInit<'_, Self>, ) { data_init.init(resource, ()); } fn can_view(client: wayland_server::Client, _global_data: &()) -> bool { client .get_data::() .map(|data| data.xwayland) .unwrap_or_default() } } impl wayland_server::Dispatch for Compositor { fn request( _state: &mut Self, _client: &wayland_server::Client, _resource: &xwayland_shell_v1::XwaylandShellV1, request: xwayland_shell_v1::Request, _data: &(), _dhandle: &wayland_server::DisplayHandle, data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { xwayland_shell_v1::Request::GetXwaylandSurface { id, surface } => { let surface_id = surface .data::() .expect("surface has no userdata"); data_init.init(id, *surface_id); } xwayland_shell_v1::Request::Destroy => (), _ => unreachable!(), } } } impl wayland_server::Dispatch for Compositor { fn request( state: &mut Self, _client: &wayland_server::Client, resource: &xwayland_surface_v1::XwaylandSurfaceV1, request: xwayland_surface_v1::Request, data: &SurfaceKey, _dhandle: &wayland_server::DisplayHandle, _data_init: &mut wayland_server::DataInit<'_, Self>, ) { match request { xwayland_surface_v1::Request::SetSerial { serial_lo, serial_hi, } => { let serial = super::make_u64(serial_hi, serial_lo); trace!(serial, "associating xwindow with surface"); if !state.set_surface_role(*data, SurfaceRole::XWayland { serial }) { resource.post_error( xwayland_shell_v1::Error::Role, "Surface already has a role.", ); } } xwayland_surface_v1::Request::Destroy => {} _ => unreachable!(), } } } ================================================ FILE: mm-server/src/session/compositor/dispatch.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 mod wl_buffer; mod wl_compositor; mod wl_data_device_manager; mod wl_drm; mod wl_output; mod wl_seat; mod wl_shm; mod wp_fractional_scale; mod wp_linux_dmabuf; mod wp_linux_drm_syncobj; mod wp_pointer_constraints; mod wp_presentation; mod wp_relative_pointer; mod wp_text_input; mod xdg_shell; mod xwayland_shell; fn make_u64(hi: u32, lo: u32) -> u64 { ((hi as u64) << 32) | lo as u64 } ================================================ FILE: mm-server/src/session/compositor/oneshot_render.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use bytes::Bytes; use drm_fourcc::DrmFourcc; use image::ImageEncoder as _; use tracing::instrument; use crate::session::compositor::buffers::{fourcc_bpp, PlaneMetadata}; use crate::vulkan::VkHostBuffer; #[instrument(skip_all)] pub fn shm_to_png(buffer: &VkHostBuffer, format: PlaneMetadata) -> anyhow::Result { // Needs to be updated if we start supporting float shm buffers. match fourcc_bpp(format.format) { Some(4) => (), _ => panic!("shm texture has unexpected format"), } let src = unsafe { std::slice::from_raw_parts_mut( buffer.access as *mut u8, (format.stride * format.height) as usize, ) }; let mut buf = vec![0_u8; (format.stride * format.height) as usize]; buf.copy_from_slice(src); let width = format.width as usize; let height = format.height as usize; let format = format.format; // For png, we need rgba8 with no padding. let mut out = Vec::with_capacity(width * height * 4); match format { DrmFourcc::Argb8888 | DrmFourcc::Xrgb8888 => { for row in buf.chunks_exact(width * 4) { for px in row.chunks_exact(4) { let out_px = [px[2], px[1], px[0], px[3]]; out.extend_from_slice(&out_px); } } } _ => unreachable!(), } let mut png = std::io::Cursor::new(Vec::new()); image::codecs::png::PngEncoder::new(&mut png).write_image( &out, width as u32, height as u32, image::ExtendedColorType::Rgba8, )?; Ok(png.into_inner().into()) } ================================================ FILE: mm-server/src/session/compositor/output.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use wayland_server::{protocol::wl_output, Resource as _}; use crate::session::compositor::{Compositor, DisplayParams}; impl Compositor { pub fn emit_output_params(&mut self) { let params = self.display_params; for proxy in &self.output_proxies { configure_output(proxy, params); } } } pub fn configure_output(output: &wl_output::WlOutput, params: DisplayParams) { let version = output.version(); if version >= 4 { output.name("MM".to_string()); output.description("Magic Mirror Virtual Display".to_string()); } output.geometry( 0, 0, params.width as i32, params.height as i32, wl_output::Subpixel::None, "Magic Mirror".to_string(), "Virtual Display".to_string(), wl_output::Transform::Normal, ); output.mode( wl_output::Mode::Current | wl_output::Mode::Preferred, params.width as i32, params.height as i32, params.framerate as i32 * 1000, ); if version >= 2 { // In the case of fractional scale, we always send the next integer // (and then scale down for clients that don't support fractional scale). let scale: f64 = params.ui_scale.into(); output.scale(scale.ceil() as i32); output.done(); } } ================================================ FILE: mm-server/src/session/compositor/protocols/wayland-drm.xml ================================================ Copyright © 2008-2011 Kristian Høgsberg Copyright © 2010-2011 Intel Corporation Permission to use, copy, modify, distribute, and sell this software and its documentation for any purpose is hereby granted without fee, provided that\n the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of the copyright holders not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. The copyright holders make no representations about the suitability of this software for any purpose. It is provided "as is" without express or implied warranty. THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. Bitmask of capabilities. ================================================ FILE: mm-server/src/session/compositor/protocols/wl_drm.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] use wayland_server; use wayland_server::protocol::*; pub mod __interfaces { use wayland_server::backend as wayland_backend; use wayland_server::protocol::__interfaces::*; wayland_scanner::generate_interfaces!("src/session/compositor/protocols/wayland-drm.xml"); } use self::__interfaces::*; wayland_scanner::generate_server_code!("src/session/compositor/protocols/wayland-drm.xml"); pub use wl_drm::*; ================================================ FILE: mm-server/src/session/compositor/protocols.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 pub mod wl_drm; ================================================ FILE: mm-server/src/session/compositor/sealed.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ ffi::CStr, fs::File, io::{Seek as _, SeekFrom, Write as _}, os::fd::{AsFd, AsRawFd, BorrowedFd}, }; use rustix::fs::{fcntl_add_seals, memfd_create, MemfdFlags, SealFlags}; pub struct SealedFile { file: File, size: usize, } impl SealedFile { pub fn new(name: impl AsRef, contents: &[u8]) -> anyhow::Result { let fd = memfd_create( name.as_ref(), MemfdFlags::CLOEXEC | MemfdFlags::ALLOW_SEALING, )?; let mut file: File = fd.into(); file.write_all(contents)?; file.flush()?; file.seek(SeekFrom::Start(0))?; fcntl_add_seals( &file, SealFlags::SEAL | SealFlags::WRITE | SealFlags::SHRINK | SealFlags::GROW, )?; Ok(Self { file, size: contents.len(), }) } pub fn size(&self) -> usize { self.size } } impl AsRawFd for SealedFile { fn as_raw_fd(&self) -> std::os::unix::prelude::RawFd { self.file.as_raw_fd() } } impl AsFd for SealedFile { fn as_fd(&self) -> BorrowedFd<'_> { self.file.as_fd() } } ================================================ FILE: mm-server/src/session/compositor/seat.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use bytes::Bytes; use cstr::cstr; use hashbrown::{HashMap, HashSet}; use tracing::{debug, warn}; use wayland_protocols::wp::{ pointer_constraints::zv1::server::zwp_locked_pointer_v1, relative_pointer::zv1::server::zwp_relative_pointer_v1, text_input::zv3::server::zwp_text_input_v3, }; use wayland_server::{ protocol::{wl_keyboard, wl_pointer, wl_surface}, Resource as _, }; use super::{surface, ControlMessage, SessionEvent}; use crate::session::compositor::{ buffers::BufferBacking, oneshot_render::shm_to_png, sealed::SealedFile, serial::Serial, surface::{surface_vector_to_buffer, SurfaceKey, SurfaceRole}, Compositor, }; use crate::session::EPOCH; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum KeyState { Pressed, Released, Repeat, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ButtonState { Pressed, Released, } impl From for wl_pointer::ButtonState { fn from(value: ButtonState) -> Self { match value { ButtonState::Pressed => wl_pointer::ButtonState::Pressed, ButtonState::Released => wl_pointer::ButtonState::Released, } } } #[derive(Debug)] struct Pointer { client_id: wayland_server::backend::ClientId, pending_frame: bool, } #[derive(Debug)] struct PointerLock { wl_pointer: wl_pointer::WlPointer, wp_locked_pointer: zwp_locked_pointer_v1::ZwpLockedPointerV1, oneshot: bool, defunct: bool, } #[derive(Default, Debug, PartialEq, Eq)] pub enum Cursor { #[default] Unset, Hidden, Surface { surface: SurfaceKey, needs_render: bool, // Contains the hotspot in physical coords. rendered: Option<(Bytes, glam::UVec2)>, // In surface coords, supplied by the client. hotspot: glam::UVec2, }, } pub struct Seat { pointers: HashMap, relative_pointers: HashMap, pointer_focus: Option<(wl_surface::WlSurface, glam::DVec2)>, pointer_coords: Option, // Global coords. keyboards: HashSet, text_inputs: HashSet, keyboard_focus: Option, keymap: SealedFile, inactive_pointer_locks: HashMap, pointer_lock: Option<(wl_surface::WlSurface, PointerLock)>, cursor: Cursor, } impl Default for Seat { fn default() -> Self { let keymap = SealedFile::new( cstr!("mm-keymap"), include_bytes!(concat!(env!("OUT_DIR"), "/keymaps/iso_us.txt")), ) .expect("failed to create keymap sealed fd"); Self { pointers: HashMap::default(), relative_pointers: HashMap::default(), pointer_focus: None, pointer_coords: None, keyboards: HashSet::default(), text_inputs: HashSet::default(), keyboard_focus: None, keymap, inactive_pointer_locks: HashMap::default(), pointer_lock: None, cursor: Cursor::default(), } } } impl Seat { pub fn get_pointer(&mut self, wl_pointer: wl_pointer::WlPointer) { let client_id = wl_pointer.client().expect("pointer has no client").id(); self.pointers.insert( wl_pointer, Pointer { client_id, pending_frame: false, }, ); } pub fn get_relative_pointer( &mut self, wp_relative_pointer: zwp_relative_pointer_v1::ZwpRelativePointerV1, wl_pointer: wl_pointer::WlPointer, ) { self.relative_pointers .insert(wp_relative_pointer, wl_pointer); } pub fn get_keyboard(&mut self, wl_keyboard: wl_keyboard::WlKeyboard) { use std::os::fd::AsFd as _; wl_keyboard.keymap( wl_keyboard::KeymapFormat::XkbV1, self.keymap.as_fd(), self.keymap.size() as u32, ); // We disable client-side key repeat handling, and instead // simulate it. if wl_keyboard.version() >= 4 { wl_keyboard.repeat_info(0, i32::MAX); } self.keyboards.insert(wl_keyboard); } pub fn get_text_input(&mut self, wp_text_input: zwp_text_input_v3::ZwpTextInputV3) { self.text_inputs.insert(wp_text_input); } pub fn destroy_pointer(&mut self, wl_pointer: &wl_pointer::WlPointer) { self.pointers.remove(wl_pointer); self.inactive_pointer_locks .retain(|_, lock| &lock.wl_pointer != wl_pointer); match &mut self.pointer_lock { Some(( _, PointerLock { wl_pointer: p, defunct, .. }, )) if p == wl_pointer => { *defunct = true; } _ => (), } } pub fn destroy_relative_pointer( &mut self, wp_relative_pointer: &zwp_relative_pointer_v1::ZwpRelativePointerV1, ) { self.relative_pointers.remove(wp_relative_pointer); } pub fn destroy_keyboard(&mut self, wl_keyboard: &wl_keyboard::WlKeyboard) { self.keyboards.remove(wl_keyboard); } pub fn destroy_text_input(&mut self, wp_text_input: &zwp_text_input_v3::ZwpTextInputV3) { self.text_inputs.remove(wp_text_input); } pub fn lift_pointer(&mut self, serial: &Serial) { self.pointer_coords = None; if let Some((surf, _)) = self.pointer_focus.take() { if let Some(client) = surf.client() { for (wl_pointer, p) in self .pointers .iter_mut() .filter(|(_, p)| p.client_id == client.id()) { p.pending_frame = true; wl_pointer.leave(serial.next(), &surf); } } } } // Moves the pointer to a location. pub fn update_pointer( &mut self, serial: &Serial, focus: wl_surface::WlSurface, surface_coords: impl Into, global_coords: impl Into, ) { if self.pointer_lock.is_some() { return; } self.pointer_coords = Some(global_coords.into()); let new_coords = surface_coords.into(); match self.pointer_focus.as_mut() { Some((surf, coords)) if surf == &focus => { // Round before checking for location equality. if coords.round().as_ivec2() != new_coords.round().as_ivec2() { for (wl_pointer, p) in self .pointers .iter_mut() .filter(|(p, _)| p.is_alive() && p.id().same_client_as(&surf.id())) { p.pending_frame = true; wl_pointer.motion( EPOCH.elapsed().as_millis() as u32, new_coords.x, new_coords.y, ); } } return; } _ => (), } if let Some((surf, _)) = self.pointer_focus.take() { for (wl_pointer, p) in self .pointers .iter_mut() .filter(|(p, _)| p.is_alive() && p.id().same_client_as(&surf.id())) { p.pending_frame = true; wl_pointer.leave(serial.next(), &surf); } } for (wl_pointer, p) in self .pointers .iter_mut() .filter(|(p, _)| p.is_alive() && p.id().same_client_as(&focus.id())) { p.pending_frame = true; wl_pointer.enter(serial.next(), &focus, new_coords.x, new_coords.y); } self.pointer_focus = Some((focus, new_coords)); } pub fn relative_pointer_motion(&mut self, surface_vector: impl Into) { if self.pointer_lock.is_none() { return; } let Some((focus, _)) = self.pointer_focus.as_ref() else { return; }; let vector = surface_vector.into(); let now = EPOCH.elapsed().as_micros() as u64; let utime_hi = (now >> 32) as u32; let utime_lo = (now & 0xffffffff) as u32; for (wp_relative_pointer, wl_pointer) in self .relative_pointers .iter() .filter(|(p, _)| p.id().same_client_as(&focus.id())) { wp_relative_pointer .relative_motion(utime_hi, utime_lo, vector.x, vector.y, vector.x, vector.y); if let Some(p) = self.pointers.get_mut(wl_pointer) { p.pending_frame = true; } } } pub fn pointer_axis(&mut self, surface_vector: impl Into) { let vector = surface_vector.into(); let now = EPOCH.elapsed().as_millis() as u32; for (wl_pointer, p) in self.focused_pointers() { if vector.x != 0.0 { wl_pointer.axis(now, wl_pointer::Axis::HorizontalScroll, vector.x); p.pending_frame = true; } if vector.y != 0.0 { wl_pointer.axis(now, wl_pointer::Axis::VerticalScroll, vector.y); p.pending_frame = true; } } } pub fn pointer_axis_discrete(&mut self, vector: impl Into) { let vector = vector.into(); for (wl_pointer, p) in self.focused_pointers() { if vector.x != 0.0 { send_axis_discrete(wl_pointer, wl_pointer::Axis::HorizontalScroll, vector.x); p.pending_frame = true; } if vector.y != 0.0 { send_axis_discrete(wl_pointer, wl_pointer::Axis::VerticalScroll, vector.y); p.pending_frame = true; } } } pub fn pointer_input( &mut self, serial: &Serial, surface: wl_surface::WlSurface, surface_coords: impl Into, global_coords: impl Into, button_code: u32, state: ButtonState, ) { let coords = surface_coords.into(); self.update_pointer(serial, surface.clone(), coords, global_coords); for (wl_pointer, p) in self.focused_pointers() { p.pending_frame = true; wl_pointer.button( serial.next(), EPOCH.elapsed().as_millis() as u32, button_code, state.into(), ); } } pub fn pointer_frame(&mut self) { for (wl_pointer, p) in self.pointers.iter_mut() { if p.pending_frame { if wl_pointer.version() >= 5 { wl_pointer.frame(); } p.pending_frame = false; } } } fn focused_pointers(&mut self) -> impl Iterator { let client_id = self .pointer_focus .as_ref() .and_then(|(focus, _)| focus.client()) .map(|c| c.id()); self.pointers .iter_mut() .filter(move |(p, _)| p.is_alive() && p.client().map(|c| c.id()) == client_id) } pub fn set_keyboard_focus(&mut self, serial: &Serial, surface: Option) { if self.keyboard_focus == surface { return; } if let Some(old_surf) = self.keyboard_focus.take() { for wl_keyboard in self .keyboards .iter() .filter(|k| k.id().same_client_as(&old_surf.id())) { wl_keyboard.leave(serial.next(), &old_surf); } for wp_text_input in self .text_inputs .iter() .filter(|ti| ti.id().same_client_as(&old_surf.id())) { wp_text_input.leave(&old_surf); } } if let Some(new_surf) = surface.as_ref() { for wl_keyboard in self .keyboards .iter() .filter(|k| k.id().same_client_as(&new_surf.id())) { wl_keyboard.enter(serial.next(), new_surf, Vec::new()); // TODO we're responsible for sending the list of depressed // modifiers. For our use case, this isn't very important. wl_keyboard.modifiers(serial.next(), 0, 0, 0, 0); } for wp_text_input in self .text_inputs .iter() .filter(|ti| ti.id().same_client_as(&new_surf.id())) { wp_text_input.enter(new_surf); } } self.keyboard_focus = surface; } pub fn keyboard_input(&mut self, serial: &Serial, scancode: u32, state: KeyState) { let state = match state { KeyState::Pressed => wl_keyboard::KeyState::Pressed, KeyState::Released => wl_keyboard::KeyState::Released, KeyState::Repeat => unreachable!(), }; for wl_keyboard in self.focused_keyboards() { wl_keyboard.key( serial.next(), EPOCH.elapsed().as_millis() as u32, scancode, state, ); } } pub fn focused_keyboards(&self) -> impl Iterator { let client_id = self .keyboard_focus .as_ref() .and_then(|focus| focus.client()) .map(|c| c.id()); self.keyboards .iter() .filter(move |k| k.is_alive() && k.client().map(|c| c.id()) == client_id) } pub fn has_text_input(&mut self) -> bool { self.focused_text_inputs().count() > 0 } pub fn text_input_char(&mut self, serial: &Serial, ch: char) { if let Some(focus) = self.keyboard_focus.as_ref() { for wp_text_input in self .text_inputs .iter() .filter(|ti| ti.id().same_client_as(&focus.id())) { wp_text_input.commit_string(Some(ch.into())); wp_text_input.done(serial.next()) } } } fn focused_text_inputs(&mut self) -> impl Iterator { let client_id = self .keyboard_focus .as_ref() .and_then(|focus| focus.client()) .map(|c| c.id()); self.text_inputs .iter() .filter(move |ti| ti.is_alive() && ti.client().map(|c| c.id()) == client_id) } pub fn pointer_focus(&self) -> Option { self.pointer_focus.as_ref().map(|(surf, _)| surf).cloned() } #[allow(dead_code)] pub fn keyboard_focus(&self) -> Option { self.keyboard_focus.clone() } pub fn pointer_coords(&self) -> Option { self.pointer_coords } pub fn pointer_locked(&self) -> Option { if self.pointer_lock.is_some() { Some(self.pointer_coords.unwrap_or_default()) } else { None } } pub fn has_lock(&self, wl_surface: &wl_surface::WlSurface) -> bool { if self .pointer_lock .as_ref() .is_some_and(|(surf, lock)| surf == wl_surface && !lock.defunct) { return true; } // Check for inactive locks that aren't already destroyed. self.inactive_pointer_locks .get(wl_surface) .is_some_and(|lock| !lock.defunct) } pub fn create_lock( &mut self, wl_pointer: wl_pointer::WlPointer, wl_surface: wl_surface::WlSurface, wp_locked_pointer: zwp_locked_pointer_v1::ZwpLockedPointerV1, oneshot: bool, ) { if self .inactive_pointer_locks .insert( wl_surface, PointerLock { wp_locked_pointer, wl_pointer, oneshot, defunct: false, }, ) .is_some() { panic!("constraint already exists for surface"); } } pub fn destroy_lock(&mut self, wp_locked_pointer: &zwp_locked_pointer_v1::ZwpLockedPointerV1) { self.inactive_pointer_locks .retain(|_, lock| &lock.wp_locked_pointer != wp_locked_pointer); match &mut self.pointer_lock { Some(( _, PointerLock { wp_locked_pointer: lock, defunct, .. }, )) if lock == wp_locked_pointer => { // Cleared in update_pointer_lock. *defunct = true; } _ => (), } } } impl Compositor { pub fn handle_input_event(&mut self, ev: ControlMessage) { match ev { ControlMessage::KeyboardInput { key_code, char, state, } => { // Attempt to send the char via text-input, then fall back to // sending the keypress. match char { Some(c) if self.default_seat.has_text_input() => { if matches!(state, KeyState::Pressed | KeyState::Repeat) { self.default_seat.text_input_char(&self.serial, c); } } _ => { let mut state = state; // Simulate a press and release on repeat. if state == KeyState::Repeat { self.default_seat.keyboard_input( &self.serial, key_code, KeyState::Released, ); state = KeyState::Pressed } self.default_seat .keyboard_input(&self.serial, key_code, state); } } } ControlMessage::PointerInput { x, y, button_code, state, } => { if let Some((id, surface_coords)) = self.surface_under((x, y)) { let wl_surface = self.surfaces[id].wl_surface.clone(); self.default_seat.pointer_input( &self.serial, wl_surface, surface_coords, (x, y), button_code, state, ); } else { self.default_seat.lift_pointer(&self.serial); } } ControlMessage::PointerMotion(x, y) => { if let Some((id, surface_coords)) = self.surface_under((x, y)) { let wl_surface = self.surfaces[id].wl_surface.clone(); self.default_seat.update_pointer( &self.serial, wl_surface, surface_coords, (x, y), ); } else { self.default_seat.lift_pointer(&self.serial); } } ControlMessage::RelativePointerMotion(x, y) => { let scale = self .default_seat .pointer_focus() .and_then(|wl_surface| wl_surface.data().copied()) .and_then(|id| self.surfaces.get(id)) .map(|surf| surf.effective_scale()) .unwrap_or_default(); let vector = surface::buffer_vector_to_surface((x, y), scale); self.default_seat.relative_pointer_motion(vector); } ControlMessage::PointerAxis(x, y) => { let scale = self .default_seat .pointer_focus() .and_then(|wl_surface| wl_surface.data().copied()) .and_then(|id| self.surfaces.get(id)) .map(|surf| surf.effective_scale()) .unwrap_or_default(); // Note that the protocol and wayland use inverted vectors. let vector = surface::buffer_vector_to_surface((-x, -y), scale); self.default_seat.pointer_axis(vector); } ControlMessage::PointerAxisDiscrete(x, y) => { self.default_seat.pointer_axis_discrete((-x, -y)); } ControlMessage::PointerEntered => { // Nothing to do - we update focus when the pointer moves. } ControlMessage::PointerLeft => { self.default_seat.lift_pointer(&self.serial); } _ => unreachable!(), } } pub fn update_pointer_lock(&mut self) { let seat = &mut self.default_seat; let focus = seat.pointer_focus(); if let Some((wl_surface, lock)) = &seat.pointer_lock { if !lock.defunct && lock.wp_locked_pointer.is_alive() && Some(wl_surface) == focus.as_ref() { // Same surface, active lock, nothing to do. return; } } let prev_lock = if let Some((surf, lock)) = seat.pointer_lock.take() { lock.wp_locked_pointer.unlocked(); let lock_clone = lock.wp_locked_pointer.clone(); if !lock.defunct && !lock.oneshot && lock.wp_locked_pointer.is_alive() { seat.inactive_pointer_locks.insert(surf, lock); } Some(lock_clone) } else { None }; if let Some((wl_surface, lock)) = focus .as_ref() .and_then(|s| seat.inactive_pointer_locks.remove_entry(s)) { lock.wp_locked_pointer.locked(); seat.pointer_lock = Some((wl_surface, lock)); let (x, y) = seat.pointer_coords().unwrap_or_default().into(); debug!(surface = ?focus, x, y, "activating pointer lock"); self.session_handle .dispatch(SessionEvent::PointerLocked(x, y)); } else if let Some(wp_locked_pointer) = prev_lock { wp_locked_pointer.unlocked(); debug!("pointer lock released"); self.session_handle.dispatch(SessionEvent::PointerReleased); } } pub fn set_cursor(&mut self, wl_pointer: &wl_pointer::WlPointer, cursor: Cursor) { if !self .default_seat .pointer_focus .as_ref() .is_some_and(|(wl_surface, _)| wl_surface.id().same_client_as(&wl_pointer.id())) { return; } match cursor { Cursor::Unset => unreachable!(), Cursor::Surface { surface: id, .. } => { let Some(surface) = self.surfaces.get_mut(id) else { return; }; if surface.role.current.is_some() && surface.role.current != Some(SurfaceRole::Cursor) { debug!( ?surface, "ignoring cursor role for surface with preexisting role" ); return; } surface.role.current = Some(SurfaceRole::Cursor); } _ => (), } let old_cursor = std::mem::replace(&mut self.default_seat.cursor, cursor); if let Cursor::Surface { surface: id, .. } = old_cursor { if let Some(surface) = self.surfaces.get_mut(id) { surface.role.current = None; self.unmap_surface(id); } } self.dispatch_cursor(); } pub fn dispatch_cursor(&mut self) { match &mut self.default_seat.cursor { Cursor::Unset => (), Cursor::Surface { needs_render, rendered: Some((img, hotspot)), .. } if !*needs_render => { self.session_handle.dispatch(SessionEvent::CursorUpdate { image: Some(img.clone()), icon: None, hotspot_x: hotspot.x, hotspot_y: hotspot.y, }); } Cursor::Surface { .. } => { // The cursor will be dispatched after it's rendered during the // next frame. } Cursor::Hidden => self.session_handle.dispatch(SessionEvent::CursorUpdate { image: None, icon: None, hotspot_x: 0, hotspot_y: 0, }), } } pub fn render_cursor(&mut self) -> anyhow::Result<()> { let Cursor::Surface { surface, hotspot, needs_render, rendered, } = &mut self.default_seat.cursor else { return Ok(()); }; if !*needs_render { return Ok(()); } let surface = &mut self.surfaces[*surface]; let buffer = surface.content.as_ref().map(|c| &self.buffers[c.buffer]); let image = match buffer.map(|b| &b.backing) { None => return Ok(()), // No content yet, try again later. Some(BufferBacking::Dmabuf { .. }) => { warn!("ignoring dmabuf cursor texture"); // TODO: for now, we set the cursor to the default. *needs_render = false; self.session_handle.dispatch(SessionEvent::CursorUpdate { image: None, icon: Some(cursor_icon::CursorIcon::Default), hotspot_x: 0, hotspot_y: 0, }); return Ok(()); } Some(BufferBacking::Shm { format, staging_buffer, .. }) => { debug!("rendering cursor to png"); shm_to_png(staging_buffer, *format)? } }; let scale = surface.effective_scale(); let hotspot = surface_vector_to_buffer(*hotspot, scale).as_uvec2(); self.session_handle.dispatch(SessionEvent::CursorUpdate { image: Some(image.clone()), icon: None, hotspot_x: hotspot.x, hotspot_y: hotspot.y, }); *rendered = Some((image, hotspot)); *needs_render = false; if let Some(cb) = surface.frame_callback.current.take() { cb.done(EPOCH.elapsed().as_millis() as u32); } Ok(()) } } fn send_axis_discrete(pointer: &wl_pointer::WlPointer, axis: wl_pointer::Axis, value: f64) { let version = pointer.version(); if (5..8).contains(&version) { pointer.axis_discrete(axis, value.trunc() as i32); } else if version >= 8 { pointer.axis_value120(axis, (value * 120.0).round() as i32); } } ================================================ FILE: mm-server/src/session/compositor/serial.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::sync::atomic::{AtomicU32, Ordering}; pub struct Serial(AtomicU32); const START: u32 = 1000; impl Serial { pub fn new() -> Self { Self(AtomicU32::new(START)) } pub fn next(&self) -> u32 { // Wrap around, but skip zero. let _ = self .0 .compare_exchange(0, START, Ordering::AcqRel, Ordering::SeqCst); self.0.fetch_add(1, Ordering::AcqRel) } } ================================================ FILE: mm-server/src/session/compositor/shm.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ os::fd::{AsFd, OwnedFd}, sync::{Arc, RwLock}, }; use anyhow::bail; use rustix::mm::{mmap, munmap, MapFlags, ProtFlags}; use wayland_server::protocol::{wl_shm, wl_shm_pool}; // TODO: malicious or broken clients can cause us to crash with SIGBUS. We // should handle that with a exception handler. slotmap::new_key_type! { pub struct ShmPoolKey; } pub struct ShmPool { pub _wl_shm: wl_shm::WlShm, pub _wl_shm_pool: wl_shm_pool::WlShmPool, pub pool: Arc>, } #[derive(Debug)] pub struct Pool { fd: OwnedFd, ptr: *mut u8, pub size: usize, } impl Pool { pub fn new(fd: OwnedFd, size: usize) -> anyhow::Result { let ptr = unsafe { map(&fd, size)? }; Ok(Pool { fd, size, ptr }) } pub fn data(&self, offset: usize, len: usize) -> &[u8] { assert!(offset + len <= self.size); unsafe { std::slice::from_raw_parts(self.ptr.add(offset), len) } } pub fn resize(&mut self, new_size: usize) -> anyhow::Result<()> { if self.ptr.is_null() { bail!("mmap defunct"); } self.unmap(); let ptr = unsafe { map(&self.fd, new_size)? }; self.ptr = ptr; self.size = new_size; Ok(()) } fn unmap(&mut self) { assert!(!self.ptr.is_null()); unsafe { munmap(self.ptr as *mut _, self.size).expect("munmap failed") } self.ptr = std::ptr::null_mut(); self.size = 0; } } unsafe impl Send for Pool {} unsafe impl Sync for Pool {} unsafe fn map(fd: impl AsFd, size: usize) -> anyhow::Result<*mut u8> { if size == 0 { bail!("zero-sized mmap"); } let ptr = mmap( std::ptr::null_mut(), size, ProtFlags::READ | ProtFlags::WRITE, MapFlags::SHARED, fd, 0, )?; Ok(ptr as *mut u8) } impl Drop for Pool { fn drop(&mut self) { if !self.ptr.is_null() { self.unmap(); } } } ================================================ FILE: mm-server/src/session/compositor/stack.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use tracing::{debug, trace}; use wayland_server::Resource as _; use crate::session::compositor::{ buffers::BufferKey, surface::{self, SurfaceKey, SurfaceRole}, Compositor, }; impl Compositor { /// Displays the surface, if it has not yet been displayed. pub fn map_surface(&mut self, id: SurfaceKey, buffer_id: BufferKey) { if self.surface_stack.contains(&id) { return; } let surface = &self.surfaces[id]; let config = surface.configuration.expect("mapping unconfigured surface"); let buffer = &self.buffers[buffer_id]; let buffer_size = buffer.dimensions(); trace!(?surface, ?buffer_size, "mapping surface"); if buffer_size != config.size { debug!( expected = ?(config.size.x, config.size.y), actual = ?(buffer_size.x, buffer_size.y), "unexpected buffer dimensions" ); } for wl_output in self .output_proxies .iter() .filter(|wl_output| wl_output.id().same_client_as(&surface.wl_surface.id())) { surface.wl_surface.enter(wl_output); } trace!(?surface, "surface mapped"); self.surface_stack.push(id); } /// Removes any configuration and attached buffer from a surface. This /// happens if a nil buffer is committed or the role object is destroyed /// by the client. pub fn unmap_surface(&mut self, id: SurfaceKey) { let surface = &mut self.surfaces[id]; trace!(?surface, "surface unmapped"); surface.content = None; surface.pending_configure = None; surface.configuration = None; surface.sent_configuration = None; self.surface_stack.retain(|v| *v != id); } /// Raises an X11 window to the top. pub fn raise_x11_surface(&mut self, serial: u64) { let stack_position = self .xwayland_surface_lookup .get(&serial) .and_then(|surface_id| self.surface_stack.iter().rposition(|id| surface_id == id)); if let Some(pos) = stack_position { self.raise_surface_at(pos); } } fn raise_surface_at(&mut self, position: usize) { let id = self.surface_stack.remove(position); if tracing::event_enabled!(tracing::Level::TRACE) { trace!(surf = ?&self.surfaces[id], "raising surface"); } self.surface_stack.push(id); } /// Updates focus and surface configurations based on any changes made to /// the stack order, mapping and unmapping of surfaces, etc. pub fn update_focus_and_visibility(&mut self, active: bool) -> anyhow::Result<()> { let top_surface = if active { self.surface_stack.last().cloned() } else { None }; if top_surface == self.active_surface { return Ok(()); } // Mark the old active surface as occluded. if let Some(conf) = self .active_surface .take() .and_then(|id| self.surfaces.get_mut(id)) .and_then(|surf| surf.configuration.as_mut()) { conf.visibility = surface::Visibility::Occluded; } if let Some(focus) = top_surface { let surf = &mut self.surfaces[focus]; trace!(active, focus = ?surf, "setting focus"); let conf = surf .configuration .as_mut() .expect("mapped surface with no configuration"); let is_fullscreen = conf.fullscreen; conf.visibility = surface::Visibility::Active; self.active_surface = Some(focus); self.default_seat .set_keyboard_focus(&self.serial, Some(surf.wl_surface.clone())); // Xwayland maintains its own focus. if let Some(SurfaceRole::XWayland { serial }) = &surf.role.current { let xwm = self.xwm.as_mut().unwrap(); let id = xwm.xwindow_for_serial(*serial).map(|xwin| xwin.id); xwm.set_focus(id)?; } else if let Some(xwm) = &mut self.xwm { // The xwayland window is occluded by a wayland window. xwm.set_focus(None)?; } trace!(?surf, depth = self.surface_stack.len(), "focus changed"); // The surface under the cursor could be different from the top one. if let Some(coords) = self.default_seat.pointer_coords() { if let Some((pointer_focus, surface_coords)) = self.surface_under(coords) { let wl_surface = self.surfaces[pointer_focus].wl_surface.clone(); self.default_seat.update_pointer( &self.serial, wl_surface, surface_coords, coords, ); } } // If the top window isn't covering the entire output, make sure we // uncover the windows below. if !is_fullscreen { for surface_id in self.surface_stack.iter().rev().skip(1) { let conf = self.surfaces[*surface_id] .configuration .as_mut() .expect("mapped surface with no configuration"); conf.visibility = surface::Visibility::Visible; if conf.fullscreen { break; } } } } else { self.default_seat.set_keyboard_focus(&self.serial, None); self.default_seat.lift_pointer(&self.serial); if let Some(xwm) = &mut self.xwm { xwm.set_focus(None)?; } } Ok(()) } pub fn surface_under( &mut self, coords: impl Into, ) -> Option<(SurfaceKey, glam::DVec2)> { let coords = coords.into(); for id in self.surface_stack.iter().rev() { let surf = &self.surfaces[*id]; if let Some(surface_coords) = surf.surface_coords(coords.round().as_uvec2()) { return Some((*id, surface_coords)); } } None } /// Returns true if all visible surfaces have settled (with no configure /// pending) and have content. pub fn surfaces_ready(&self) -> bool { if self.surface_stack.is_empty() { return false; } // Iterate backwards to find the first fullscreen window. let first_visible_idx = self.surface_stack.iter().rposition(|id| { self.surfaces[*id] .configuration .is_some_and(|conf| conf.fullscreen) }); for id in &self.surface_stack[first_visible_idx.unwrap_or_default()..] { let surf = &self.surfaces[*id]; if surf.content.is_none() || surf.pending_configure.is_some() { debug!( ?surf, content_is_some = surf.content.is_some(), pending_configure = ?surf.pending_configure, "surface not ready!" ); return false; } } true } } ================================================ FILE: mm-server/src/session/compositor/surface.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::time; use tracing::{debug, trace, warn}; use wayland_protocols::{ wp::{ fractional_scale::v1::server::wp_fractional_scale_v1, linux_drm_syncobj::v1::server::wp_linux_drm_syncobj_surface_v1, presentation_time::server::wp_presentation_feedback, }, xdg::shell::server::{xdg_surface, xdg_toplevel}, }; use wayland_server::{ protocol::{wl_callback, wl_surface}, Resource as _, }; use super::buffers::SyncobjTimelinePoint; use crate::{ pixel_scale::PixelScale, session::compositor::{ buffers::{BufferBacking, BufferKey}, xwayland, Compositor, DisplayParams, }, vulkan::VkTimelinePoint, }; slotmap::new_key_type! { pub struct SurfaceKey; } #[derive(Clone)] pub struct Surface { pub wl_surface: wl_surface::WlSurface, pub wp_fractional_scale: Option, pub pending_buffer: Option, pub pending_feedback: Option, pub frame_callback: DoubleBuffered, pub buffer_scale: DoubleBuffered, pub content: Option, pub wp_syncobj_surface: Option, pub pending_acquire_point: Option, pub pending_release_point: Option, pub role: DoubleBuffered, pub sent_configuration: Option, pub configuration: Option, pub pending_configure: Option, pub title: Option, pub app_id: Option, } impl Surface { pub fn new(wl_surface: wl_surface::WlSurface) -> Self { Self { wl_surface, wp_fractional_scale: None, pending_buffer: None, pending_feedback: None, frame_callback: DoubleBuffered::default(), buffer_scale: DoubleBuffered::default(), content: None, wp_syncobj_surface: None, pending_acquire_point: None, pending_release_point: None, role: DoubleBuffered::default(), sent_configuration: None, configuration: None, pending_configure: None, title: None, app_id: None, } } pub fn reconfigure(&mut self, params: DisplayParams, xwin: Option<&xwayland::XWindow>) { // Keep current visibility, or start new windows visible. let visibility = self .configuration .map_or(Visibility::Visible, |c| c.visibility); let conf = match self.role.current { None | Some(SurfaceRole::Cursor) => None, Some(SurfaceRole::XdgToplevel { .. }) => Some(SurfaceConfiguration { topleft: glam::UVec2::ZERO, size: (params.width, params.height).into(), scale: params.ui_scale, visibility, fullscreen: true, }), Some(SurfaceRole::XWayland { .. }) => { match xwin { None => None, Some(xwayland::XWindow { x, y, width, height, override_redirect, .. }) if *override_redirect => Some(SurfaceConfiguration { topleft: (*x, *y).into(), size: (*width, *height).into(), scale: PixelScale::ONE, visibility, fullscreen: false, }), Some(_) => { Some(SurfaceConfiguration { topleft: glam::UVec2::ZERO, size: (params.width, params.height).into(), scale: PixelScale::ONE, // XWayland always uses scale one. visibility, fullscreen: true, }) } } } }; self.configuration = conf; } /// Takes a point in the physical configuration space, and returns /// wayland-specific logical surface coordinates. pub fn surface_coords(&self, coords: impl Into) -> Option { let conf = self.configuration?; let buffer_size = self .content .as_ref() .map(|content| content.dimensions.as_dvec2())?; let coords = coords.into(); let topleft = conf.topleft.as_dvec2(); let bottomright = topleft + conf.size.as_dvec2(); if conf.fullscreen || (coords.x >= topleft.x && coords.y >= topleft.y && coords.x < bottomright.x && coords.y < bottomright.y) { let offset_coords = coords - conf.topleft.as_dvec2(); let buffer_coords = offset_coords * (buffer_size / conf.size.as_dvec2()); Some(buffer_vector_to_surface( buffer_coords, self.effective_scale(), )) } else { None } } pub fn effective_scale(&self) -> PixelScale { self.buffer_scale.current.unwrap_or_default() } } impl std::fmt::Debug for Surface { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let name = self .title .as_ref() .or(self.app_id.as_ref()) .map(|s| s.as_str()) .unwrap_or("Untitled"); let (role, id, extra) = match &self.role.current { None => ("wl_surface", self.wl_surface.id().protocol_id() as u64, ""), Some(SurfaceRole::Cursor) => ( "wl_surface", self.wl_surface.id().protocol_id() as u64, " [CURSOR]", ), Some(SurfaceRole::XdgToplevel { xdg_toplevel, .. }) => { ("xdg_toplevel", xdg_toplevel.id().protocol_id() as u64, "") } Some(SurfaceRole::XWayland { serial }) => ("xwayland", *serial, ""), }; write!(f, "<{:?} {}@{}{}>", name, role, id, extra)?; Ok(()) } } #[derive(Debug, Clone, Eq, PartialEq)] pub struct DoubleBuffered { pub pending: Option, pub current: Option, } impl Default for DoubleBuffered { fn default() -> Self { Self { pending: None, current: None, } } } #[derive(Debug)] pub enum CommitResult { NoChange, Added(T), Replaced(T, T), } impl DoubleBuffered { pub fn promote(&mut self) -> CommitResult { if self.pending.is_none() || self.pending == self.current { self.pending = None; return CommitResult::NoChange; } match (self.pending.take(), self.current.take()) { (Some(v), None) => { self.current = Some(v.clone()); CommitResult::Added(v) } (Some(new), Some(old)) if new != old => { self.current = Some(new.clone()); CommitResult::Replaced(old, new) } _ => unreachable!(), } } } #[derive(Debug, Clone, Eq, PartialEq)] pub enum SurfaceRole { XdgToplevel { xdg_surface: xdg_surface::XdgSurface, xdg_toplevel: xdg_toplevel::XdgToplevel, }, XWayland { serial: u64, }, Cursor, } #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum Visibility { Occluded, Visible, Active, } /// The configuration to be sent to the surface. #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub struct SurfaceConfiguration { // x, y, width, and height are in the "physical" coordinate space. x and y // are not relevant to xdg_shell surfaces. pub topleft: glam::UVec2, pub size: glam::UVec2, pub scale: PixelScale, pub fullscreen: bool, pub visibility: Visibility, } impl SurfaceConfiguration {} #[derive(Debug, Clone, Eq, PartialEq)] pub enum PendingBuffer { Attach(BufferKey), Detach, } #[derive(Clone)] pub struct ContentUpdate { pub buffer: BufferKey, /// Whether the client is waiting on a buffer.release(). pub needs_release: bool, /// Used for explicit sync. pub explicit_sync: Option<(SyncobjTimelinePoint, SyncobjTimelinePoint)>, /// If the content update is in use, this timeline point indicates when it /// will be free. pub tp_done: Option, /// The real dimensions of the buffer. This is how surface coordinates are /// determined in wayland. pub dimensions: glam::UVec2, pub wp_presentation_feedback: Option, } pub struct PendingPresentationFeedback( pub wp_presentation_feedback::WpPresentationFeedback, pub VkTimelinePoint, ); pub struct CommitError(pub xdg_surface::Error, pub String); impl Compositor { /// Handles wl_surface.commit. pub fn surface_commit(&mut self, id: SurfaceKey) -> Result<(), CommitError> { let display_params = self.display_params; let surface = &mut self.surfaces[id]; // Buffer swap happens first. We handle it a bit differently because // buffers can be removed, not just overwritten. let mut feedback = surface.pending_feedback.take(); match surface.pending_buffer.take() { Some(PendingBuffer::Detach) => { self.unmap_surface(id); return Ok(()); } Some(PendingBuffer::Attach(buffer_id)) => { // Creates a content update. let buffer = &mut self.buffers[buffer_id]; // If we haven't yet sent a configure, it's an error to // manipulate a buffer. if (matches!(surface.role.current, Some(SurfaceRole::XdgToplevel { .. })) && surface.sent_configuration.is_none()) || surface.role.pending.is_some() { return Err(CommitError( xdg_surface::Error::UnconfiguredBuffer, "The surface must be configured prior to attaching a buffer.".to_string(), )); } // If we're waiting on an ack_configure, poke the client again. if surface.pending_configure.is_some() { debug!(pending_configure = ?surface.pending_configure, "pending configure, resending frame callback"); if let Some(fb) = feedback.take() { fb.discarded(); } if let Some(cb) = surface.frame_callback.pending.take() { cb.done(self.serial.next()); } } // In the case of shm buffer, we do a copy and immediately release it. let mut needs_release = true; if let BufferBacking::Shm { staging_buffer, format, pool, dirty, .. } = &mut buffer.backing { // A large shm buffer is probably a sign that something has gone wrong. if format.width > 500 && format.height > 500 && surface.content.is_none() { warn!( "client appears to be using software rendering; performance may be \ degraded" ); } let len = (format.stride * format.height) as usize; let pool = pool.read().unwrap(); let contents = pool.data(format.offset as usize, len); staging_buffer.copy_from_slice(contents); *dirty = true; needs_release = false; buffer.wl_buffer.release(); } // Check for explicit sync. let explicit_sync = surface .wp_syncobj_surface .as_ref() .and_then(|wp_syncobj_surface| { let Some(acquire_point) = surface.pending_acquire_point.take() else { wp_syncobj_surface.post_error( wp_linux_drm_syncobj_surface_v1::Error::NoAcquirePoint, "No acquire point set.", ); return None; }; let Some(release_point) = surface.pending_release_point.take() else { wp_syncobj_surface.post_error( wp_linux_drm_syncobj_surface_v1::Error::NoReleasePoint, "No release point set.", ); return None; }; Some((acquire_point, release_point)) }); if needs_release && explicit_sync.is_some() { // No need for release events if explicit sync is used. needs_release = false; } let old_content = surface.content.replace(ContentUpdate { buffer: buffer_id, needs_release, explicit_sync, tp_done: None, dimensions: buffer.dimensions(), wp_presentation_feedback: feedback, }); if let Some(old_content) = old_content { // Enqueue the buffer for release. self.in_flight_buffers.push(old_content); } } None => (), } // Configure surfaces which have a newly applied role. match surface.role.promote() { CommitResult::Replaced(_, _) => panic!("surface already has a role"), CommitResult::Added(role) => { let xwin = if let SurfaceRole::XWayland { serial } = role { self.xwayland_surface_lookup.insert(serial, id); self.xwm.as_ref().unwrap().xwindow_for_serial(serial) } else { None }; surface.reconfigure(display_params, xwin); } _ => (), } surface.buffer_scale.promote(); surface.frame_callback.promote(); trace!(?surface, "surface commit"); // Map the surface, if we've fulfilled all requirements. let is_mappable = match surface.role.current { None | Some(SurfaceRole::Cursor) => false, Some(SurfaceRole::XdgToplevel { .. }) => { surface.pending_configure.is_none() && surface.content.is_some() } Some(SurfaceRole::XWayland { serial }) => { if surface.content.is_none() { false } else if let Some(xwin) = self.xwm.as_mut().unwrap().xwindow_for_serial(serial) { // Copy over title and app_id. surface.title = xwin.title.clone(); surface.app_id = xwin.app_id.clone(); xwin.mapped } else { false } } }; if is_mappable { if let Some(ContentUpdate { buffer, .. }) = surface.content { self.map_surface(id, buffer); } } Ok(()) } /// Cleans up for a surface destroyed by the client. pub fn surface_destroyed(&mut self, id: SurfaceKey) { self.unmap_surface(id); let surf = self.surfaces.remove(id); if let Some(SurfaceRole::XWayland { serial }) = surf.and_then(|s| s.role.current) { self.xwayland_surface_lookup.remove(&serial); } } /// Sets a pending role for the surface. Returns false if the surface /// already has a role or no longer exists. pub fn set_surface_role(&mut self, id: SurfaceKey, role: SurfaceRole) -> bool { match self.surfaces.get_mut(id) { Some(ref mut surf) if surf.role.current.is_none() => { surf.role.pending = Some(role); true } _ => false, } } /// Checks if any surfaces have outdated configuration, and sends a /// configure event. pub fn configure_surfaces(&mut self) -> anyhow::Result<()> { for (_id, surface) in self.surfaces.iter_mut() { if surface.configuration.is_none() || surface.configuration == surface.sent_configuration { continue; } trace!(?surface, conf = ?surface.configuration, "configuring surface"); let conf = surface.configuration.unwrap(); match &surface.role.current { None => panic!("surface configured without role"), Some(SurfaceRole::XdgToplevel { xdg_surface, xdg_toplevel, }) => { if conf.scale.is_fractional() { warn!( scale = ?conf.scale, "fractional scale not supported, using next integer" ) } let scale = conf.scale.ceil(); if surface.wl_surface.version() >= 6 { let scale: f64 = scale.into(); surface.wl_surface.preferred_buffer_scale(scale as i32); } if let Some(wp_fractional_scale) = &surface.wp_fractional_scale { wp_fractional_scale.preferred_scale((f64::from(scale) * 120.0) as u32); } let mut states = match conf.visibility { Visibility::Occluded if xdg_toplevel.version() >= 6 => { vec![xdg_toplevel::State::Suspended] } Visibility::Occluded => vec![], Visibility::Visible => vec![], Visibility::Active => vec![xdg_toplevel::State::Activated], }; if conf.fullscreen { states.push(xdg_toplevel::State::Fullscreen); } let raw_states = states .into_iter() .flat_map(|st| { let v: u32 = st.into(); v.to_ne_bytes() }) .collect::>(); // Wayland wants the "logical" width and height to be // pre-scaling. That means if we want a 1200x600 buffer // at 2x ui scale, we need to configure it for 600x300. let scaled: glam::IVec2 = buffer_vector_to_surface(conf.size, scale).as_ivec2(); let serial = self.serial.next(); xdg_toplevel.configure(scaled.x, scaled.y, raw_states); xdg_surface.configure(serial); surface.sent_configuration = Some(conf); surface.pending_configure = Some(serial); } Some(SurfaceRole::XWayland { serial }) => { let xwm = self.xwm.as_mut().unwrap(); match xwm.xwindow_for_serial(*serial) { Some(xwayland::XWindow { id, override_redirect, .. }) if !override_redirect => { xwm.configure_window(*id, conf)?; } _ => (), } surface.sent_configuration = Some(conf); } Some(SurfaceRole::Cursor) => unreachable!(), } } Ok(()) } /// Sends complete presentation feedback. Note that since this is called as /// an idle operation, the timestamps are only accurate if the compositor /// thread is woken within a reasonable timeframe. pub fn send_presentation_feedback(&mut self) -> anyhow::Result<()> { let time = rustix::time::clock_gettime(rustix::time::ClockId::Monotonic); let tv_sec_hi = (time.tv_sec >> 32) as u32; let tv_sec_lo = (time.tv_sec & 0xFFFFFFFF) as u32; let tv_nsec = time.tv_nsec as u32; let framerate = self.display_params.framerate; let refresh = time::Duration::from_secs_f64(1.0 / framerate as f64).as_nanos() as u32; let mut still_pending = Vec::with_capacity(self.pending_presentation_feedback.len()); for PendingPresentationFeedback(fb, tp) in self.pending_presentation_feedback.drain(..) { if unsafe { !tp.poll()? } { still_pending.push(PendingPresentationFeedback(fb, tp)); continue; } for wl_output in self .output_proxies .iter() .filter(|wl_output| wl_output.id().same_client_as(&fb.id())) { fb.sync_output(wl_output); } fb.presented( tv_sec_hi, tv_sec_lo, tv_nsec, refresh, 0, // seq_hi 0, // seq_lo wp_presentation_feedback::Kind::empty(), ); } self.pending_presentation_feedback = still_pending; Ok(()) } } /// Converts a vector of pixels into surface-local or "logical" coordinates /// as wayland expects them. pub fn buffer_vector_to_surface(coords: impl Into, scale: PixelScale) -> glam::DVec2 { let scale: f64 = scale.into(); coords.into() / scale } /// Converts a surface-local vector (sometimes called "logical" coordinates) /// into pixels. pub fn surface_vector_to_buffer(coords: impl Into, scale: PixelScale) -> glam::DVec2 { let scale: f64 = scale.into(); coords.into() * scale } ================================================ FILE: mm-server/src/session/compositor/xwayland/xwm.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ collections::BTreeMap, os::fd::{AsFd as _, BorrowedFd}, }; use hashbrown::HashSet; use tracing::{debug, trace}; use x11rb::{ connection::Connection as _, cookie::VoidCookie, protocol::{ self, composite::ConnectionExt as _, xproto::{self, ConnectionExt as _}, }, rust_connection::{ConnectionError, DefaultStream, RustConnection as X11Connection}, wrapper::ConnectionExt as _, }; use crate::{ pixel_scale::PixelScale, session::compositor::{ surface::{self, SurfaceConfiguration}, Compositor, }, }; x11rb::atom_manager! { /// Atoms used by the XWM and X11Surface types pub Atoms: AtomsCookie { WL_SURFACE_SERIAL, UTF8_STRING, WM_HINTS, WM_PROTOCOLS, WM_TAKE_FOCUS, WM_CHANGE_STATE, _NET_WM_NAME, _NET_WM_MOVERESIZE, _NET_WM_STATE_MODAL, WM_S0, WM_STATE, _NET_WM_CM_S0, _NET_SUPPORTED, _NET_ACTIVE_WINDOW, _NET_CLIENT_LIST, _NET_CLIENT_LIST_STACKING, _NET_WM_STATE, _NET_WM_STATE_MAXIMIZED_VERT, _NET_WM_STATE_MAXIMIZED_HORZ, _NET_WM_STATE_HIDDEN, _NET_WM_STATE_FULLSCREEN, _NET_WM_STATE_FOCUSED, _NET_SUPPORTING_WM_CHECK, } } pub struct XWindow { pub id: u32, pub serial: Option, pub title: Option, pub app_id: Option, pub x: u32, pub y: u32, pub width: u32, pub height: u32, pub states: HashSet, pub protocols: HashSet, pub hint_input: bool, pub override_redirect: bool, pub mapped: bool, // Whether MapRequest/MapNotify has been recieved. } impl std::fmt::Debug for XWindow { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut title = self.title.as_deref().unwrap_or("Untitled"); if title.is_empty() { title = "Untitled"; } let serial = if let Some(s) = self.serial { format!(" serial={}", s) } else { "".to_string() }; let override_redirect = if self.override_redirect { " [OR]" } else { "" }; write!( f, "<{} \"{}\"{}{}>", self.id, title, serial, override_redirect )?; Ok(()) } } pub struct Xwm { conn: X11Connection, atoms: Atoms, wm_id: u32, screen: xproto::Screen, client_list: Vec, client_list_stacking: Vec, pub xwindows: BTreeMap, pub serials: BTreeMap, } impl Xwm { pub fn new(x11_socket: mio::net::UnixStream) -> anyhow::Result { let stream = DefaultStream::from_unix_stream(x11_socket.into())?.0; let conn = X11Connection::connect_to_stream(stream, 0)?; let atoms = Atoms::new(&conn)?.reply()?; let screen = conn.setup().roots[0].clone(); { let font = xproto::FontWrapper::open_font(&conn, "cursor".as_bytes())?; let cursor = xproto::CursorWrapper::create_glyph_cursor( &conn, font.font(), font.font(), 68, 69, 0, 0, 0, u16::MAX, u16::MAX, u16::MAX, )?; conn.change_window_attributes( screen.root, &xproto::ChangeWindowAttributesAux::default() .event_mask( xproto::EventMask::SUBSTRUCTURE_REDIRECT | xproto::EventMask::SUBSTRUCTURE_NOTIFY | xproto::EventMask::PROPERTY_CHANGE, // | xproto::EventMask::FOCUS_CHANGE, ) .cursor(cursor.cursor()), )?; } let wm_id = conn.generate_id()?; conn.create_window( screen.root_depth, wm_id, screen.root, 0, 0, 10, 10, 0, xproto::WindowClass::INPUT_OUTPUT, x11rb::COPY_FROM_PARENT, &Default::default(), )?; conn.set_selection_owner(wm_id, atoms.WM_S0, x11rb::CURRENT_TIME)?; conn.set_selection_owner(wm_id, atoms._NET_WM_CM_S0, x11rb::CURRENT_TIME)?; conn.composite_redirect_subwindows(screen.root, protocol::composite::Redirect::MANUAL)?; conn.change_property32( xproto::PropMode::REPLACE, screen.root, atoms._NET_SUPPORTED, xproto::AtomEnum::ATOM, &[ atoms._NET_WM_STATE, atoms._NET_WM_STATE_MAXIMIZED_HORZ, atoms._NET_WM_STATE_MAXIMIZED_VERT, atoms._NET_WM_STATE_HIDDEN, atoms._NET_WM_STATE_FULLSCREEN, atoms._NET_WM_STATE_MODAL, atoms._NET_WM_STATE_FOCUSED, atoms._NET_ACTIVE_WINDOW, atoms._NET_WM_MOVERESIZE, atoms._NET_CLIENT_LIST, atoms._NET_CLIENT_LIST_STACKING, ], )?; replace_window_list(&conn, screen.root, atoms._NET_ACTIVE_WINDOW, [0])?; replace_window_list(&conn, screen.root, atoms._NET_SUPPORTING_WM_CHECK, [wm_id])?; replace_window_list(&conn, wm_id, atoms._NET_SUPPORTING_WM_CHECK, [wm_id])?; conn.change_property8( xproto::PropMode::REPLACE, wm_id, atoms._NET_WM_NAME, atoms.UTF8_STRING, "Magic Mirror XWM".as_bytes(), )?; conn.flush()?; Ok(Self { conn, atoms, wm_id, screen, client_list: Vec::new(), client_list_stacking: Vec::new(), xwindows: BTreeMap::new(), serials: BTreeMap::new(), }) } pub fn display_fd(&self) -> BorrowedFd { self.conn.stream().as_fd() } pub fn xwindow_for_serial(&self, serial: u64) -> Option<&XWindow> { self.serials .get(&serial) .and_then(|id| self.xwindows.get(id)) } pub fn configure_window( &mut self, window: u32, conf: SurfaceConfiguration, ) -> anyhow::Result<()> { if let Some(xwin) = self.xwindows.get_mut(&window) { trace!(?xwin, ?conf, "configuring xwindow"); self.conn.configure_window( window, &xproto::ConfigureWindowAux::default() .x(conf.topleft.x as i32) .y(conf.topleft.y as i32) .width(conf.size.x) .height(conf.size.y) .border_width(0) .stack_mode(xproto::StackMode::ABOVE), )?; self.conn.send_event( false, window, xproto::EventMask::STRUCTURE_NOTIFY, xproto::ConfigureNotifyEvent { response_type: xproto::CONFIGURE_NOTIFY_EVENT, sequence: 0, event: window, window, above_sibling: x11rb::NONE, x: conf.topleft.x as i16, y: conf.topleft.y as i16, width: conf.size.x as u16, height: conf.size.y as u16, border_width: 0, override_redirect: false, }, )?; let old_states = xwin.states.clone(); match conf.visibility { surface::Visibility::Occluded => { xwin.states.insert(self.atoms._NET_WM_STATE_HIDDEN); xwin.states.remove(&self.atoms._NET_WM_STATE_FOCUSED); } surface::Visibility::Visible => { xwin.states.remove(&self.atoms._NET_WM_STATE_FOCUSED); xwin.states.remove(&self.atoms._NET_WM_STATE_HIDDEN); } surface::Visibility::Active => { xwin.states.remove(&self.atoms._NET_WM_STATE_HIDDEN); xwin.states.insert(self.atoms._NET_WM_STATE_FOCUSED); } } if conf.fullscreen { xwin.states.insert(self.atoms._NET_WM_STATE_FULLSCREEN); } else { xwin.states.remove(&self.atoms._NET_WM_STATE_FULLSCREEN); } if xwin.states != old_states { let values = xwin.states.iter().copied().collect::>(); if tracing::event_enabled!(tracing::Level::TRACE) { let names = values .iter() .copied() .map(|atom| get_atom_name(&self.conn, atom)) .collect::, _>>()?; trace!(?xwin, ?names, "setting states"); } self.conn.change_property32( xproto::PropMode::REPLACE, xwin.id, self.atoms._NET_WM_STATE, xproto::AtomEnum::ATOM, &values, )?; } self.conn.flush()?; } else { debug!(window, "skipping configure for dead window") } Ok(()) } pub fn set_focus(&self, window: Option) -> anyhow::Result<()> { let Some(xwin) = window.and_then(|id| self.xwindows.get(&id)) else { trace!("removing input focus"); self.conn.set_input_focus( xproto::InputFocus::NONE, x11rb::NONE, x11rb::CURRENT_TIME, )?; self.conn.flush()?; return Ok(()); }; replace_window_list( &self.conn, self.screen.root, self.atoms._NET_ACTIVE_WINDOW, [xwin.id], )?; // "Passive and Locally Active clients set the input field of WM_HINTS // to True, which indicates that they require window manager assistance // in acquiring the input focus." // TODO: for some reason this seems to cause problems, for example for // steam context menus, which flicker out immediately. if xwin.hint_input { trace!(?xwin, "setting input focus"); self.conn.set_input_focus( xproto::InputFocus::POINTER_ROOT, xwin.id, x11rb::CURRENT_TIME, )?; } // "Windows with the atom WM_TAKE_FOCUS in their WM_PROTOCOLS property // may receive a ClientMessage event from the window manager with // WM_TAKE_FOCUS..." if xwin.protocols.contains(&self.atoms.WM_TAKE_FOCUS) { trace!(?xwin, "sending TAKE_FOCUS"); let event = xproto::ClientMessageEvent::new( 32, xwin.id, self.atoms.WM_PROTOCOLS, [self.atoms.WM_TAKE_FOCUS, x11rb::CURRENT_TIME, 0, 0, 0], ); self.conn .send_event(false, xwin.id, xproto::EventMask::NO_EVENT, event)?; } self.conn.flush()?; Ok(()) } } impl Compositor { /// Adds xwayland as a client, returning a pollable FD for the xwm. pub fn insert_xwayland( &mut self, socket: mio::net::UnixStream, ) -> anyhow::Result> { debug!("starting xwm"); let xwm = Xwm::new(socket)?; Ok(self.xwm.insert(xwm).display_fd()) } pub fn dispatch_xwm(&mut self) -> anyhow::Result<()> { loop { match self.xwm.as_mut().unwrap().conn.poll_for_event()? { Some(ev) => handle_event(self, ev)?, None => return Ok(()), } } } pub fn delayed_map_xwin(&mut self, serial: u64) { let Some(xwin) = self.xwm.as_ref().unwrap().xwindow_for_serial(serial) else { return; }; let Some(surface_id) = self.xwayland_surface_lookup.get(&serial) else { return; }; let display_params = self.display_params; let surf = &mut self.surfaces[*surface_id]; surf.title = xwin.title.clone(); surf.app_id = xwin.app_id.clone(); surf.reconfigure(display_params, Some(xwin)); if let Some(surface::ContentUpdate { buffer, .. }) = surf.content { self.map_surface(*surface_id, buffer); } } } fn handle_event(state: &mut Compositor, ev: protocol::Event) -> anyhow::Result<()> { trace!(?ev, "x11 event"); let display_params = state.display_params; let xwm = state.xwm.as_mut().unwrap(); use protocol::Event::*; match ev { CreateNotify(msg) => { if msg.window == xwm.wm_id { return Ok(()); } // Track property changes (such as the window title). xwm.conn.change_window_attributes( msg.window, &xproto::ChangeWindowAttributesAux::new() .event_mask(xproto::EventMask::PROPERTY_CHANGE), )?; xwm.conn.flush()?; let title = fetch_string_property(&xwm.conn, msg.window, xwm.atoms._NET_WM_NAME)?; let app_id = fetch_class(&xwm.conn, msg.window)?; let hints = fetch_hints(&xwm.conn, msg.window)?; let protocols = fetch_protocols(&xwm.conn, xwm.atoms.WM_PROTOCOLS, msg.window)?; trace!(?hints, ?protocols, "fetched state"); let xwin = XWindow { id: msg.window, serial: None, title, app_id, x: msg.x as u32, y: msg.y as u32, width: msg.width as u32, height: msg.height as u32, states: HashSet::new(), protocols, hint_input: hints.and_then(|h| h.input).unwrap_or_default(), override_redirect: msg.override_redirect, mapped: false, }; trace!(?xwin, "xwindow created"); xwm.xwindows.insert(msg.window, xwin); xwm.conn.flush()?; } MapRequest(xproto::MapRequestEvent { window, .. }) => { if let Some(xwin) = xwm.xwindows.get_mut(&window) { // We already map the window on the X11 side; otherwise clients // just hang there. trace!(?xwin, "mapping xwindow"); xwm.conn.map_window(window)?; let property = [1, 0]; // NORMAL, NONE xwm.conn.change_property32( xproto::PropMode::REPLACE, window, xwm.atoms.WM_STATE, xwm.atoms.WM_STATE, &property, )?; xwm.conn.flush()?; xwin.mapped = true; } } MapNotify(xproto::MapNotifyEvent { window, .. }) => { if let Some(xwin) = xwm.xwindows.get_mut(&window) { trace!(?xwin, "map notify"); xwin.mapped = true; if xwin.override_redirect { // Do nothing. } else { xwm.client_list.push(window); xwm.client_list_stacking.push(window); xwm.conn.change_property32( xproto::PropMode::APPEND, xwm.screen.root, xwm.atoms._NET_CLIENT_LIST, xproto::AtomEnum::WINDOW, &[window], )?; xwm.conn.change_property32( xproto::PropMode::APPEND, xwm.screen.root, xwm.atoms._NET_CLIENT_LIST_STACKING, xproto::AtomEnum::WINDOW, &[window], )?; xwm.conn.flush()?; } if let Some(serial) = xwin.serial { state.raise_x11_surface(serial) } } else { trace!(window, "MapNotify for missing surface"); } } ConfigureRequest(msg) => { trace!( width = msg.width, height = msg.height, x = msg.x, y = msg.y, parent = msg.parent, sibling = msg.sibling, stack_mode = ?msg.stack_mode, mask = ?msg.value_mask, "configuration request" ); let serial = xwm .serials .iter() .find_map(|(k, v)| if *v == msg.window { Some(k) } else { None }); if let Some(surf) = serial .and_then(|serial| state.xwayland_surface_lookup.get(serial)) .and_then(|id| state.surfaces.get_mut(*id)) { if let Some(conf) = surf.configuration { xwm.configure_window(msg.window, conf)?; surf.sent_configuration = Some(conf); surf.pending_configure = None; } } else if let Some(xwin) = xwm.xwindows.get_mut(&msg.window) { trace!("sending synthetic configure"); // Create a synthetic configuration event based on what the // window requested. if msg.value_mask.contains(xproto::ConfigWindow::X) { xwin.x = msg.x as u32; } if msg.value_mask.contains(xproto::ConfigWindow::Y) { xwin.y = msg.y as u32; } if msg.value_mask.contains(xproto::ConfigWindow::WIDTH) { xwin.width = msg.width as u32; } if msg.value_mask.contains(xproto::ConfigWindow::HEIGHT) { xwin.height = msg.height as u32; } let conf = SurfaceConfiguration { topleft: (xwin.x, xwin.y).into(), size: (xwin.width, xwin.height).into(), scale: PixelScale::ONE, visibility: surface::Visibility::Visible, fullscreen: false, }; xwm.configure_window(msg.window, conf)?; } } ConfigureNotify(msg) => { if let Some(xwin) = xwm.xwindows.get_mut(&msg.window) { trace!( ?xwin, x = msg.x, y = msg.y, width = msg.width, height = msg.height, above = msg.above_sibling, or = msg.override_redirect, "configure notify" ); xwin.x = msg.x as u32; xwin.y = msg.y as u32; xwin.width = msg.width as u32; xwin.height = msg.height as u32; xwin.override_redirect = msg.override_redirect; if let Some(surf) = xwin .serial .and_then(|serial| state.xwayland_surface_lookup.get(&serial)) .and_then(|id| state.surfaces.get_mut(*id)) { surf.reconfigure(display_params, Some(xwin)); } } } UnmapNotify(msg) => { if let Some(xwin) = xwm.xwindows.get_mut(&msg.window) { trace!(?xwin, "unmap notify"); xwin.mapped = false; xwm.client_list.retain(|id| *id != xwin.id); xwm.client_list_stacking.retain(|id| *id != xwin.id); replace_window_list( &xwm.conn, xwm.screen.root, xwm.atoms._NET_CLIENT_LIST, &xwm.client_list, )?; replace_window_list( &xwm.conn, xwm.screen.root, xwm.atoms._NET_CLIENT_LIST_STACKING, &xwm.client_list_stacking, )?; } } DestroyNotify(msg) => { if let Some(xwin) = xwm.xwindows.remove(&msg.window) { xwm.client_list.retain(|id| *id != xwin.id); xwm.client_list_stacking.retain(|id| *id != xwin.id); xwm.serials.retain(|_, id| *id != xwin.id); replace_window_list( &xwm.conn, xwm.screen.root, xwm.atoms._NET_CLIENT_LIST, &xwm.client_list, )?; replace_window_list( &xwm.conn, xwm.screen.root, xwm.atoms._NET_CLIENT_LIST_STACKING, &xwm.client_list_stacking, )?; } } ClientMessage(msg) if msg.type_ == xwm.atoms.WL_SURFACE_SERIAL => { let [lo, hi, ..] = msg.data.as_data32(); let serial = ((hi as u64) << 32) | lo as u64; xwm.serials.insert(serial, msg.window); if let Some(xwin) = xwm.xwindows.get_mut(&msg.window) { xwin.serial = Some(serial); trace!(?xwin, "WL_SURFACE_SERIAL set"); // This sometimes happens after the surface is committed. if xwin.mapped { state.delayed_map_xwin(serial); } } } ClientMessage(msg) if msg.type_ == xwm.atoms._NET_WM_STATE => { let [action, a, b, ..] = msg.data.as_data32(); if let Some(xwin) = xwm.xwindows.get_mut(&msg.window) { let old_states = xwin.states.clone(); for value in [a, b] { const REMOVE: u32 = 0; const ADD: u32 = 1; const TOGGLE: u32 = 2; match (action, value) { (_, x11rb::NONE) => (), (REMOVE, v) => { xwin.states.remove(&v); } (ADD, v) => { xwin.states.insert(v); } (TOGGLE, v) => { if xwin.states.contains(&v) { xwin.states.remove(&v); } else { xwin.states.insert(v); } } _ => (), } } if xwin.states != old_states { let values = xwin.states.iter().copied().collect::>(); if tracing::event_enabled!(tracing::Level::TRACE) { let names = values .iter() .copied() .map(|atom| get_atom_name(&xwm.conn, atom)) .collect::, _>>()?; trace!(?xwin, ?names, "setting states"); } xwm.conn.change_property32( xproto::PropMode::REPLACE, xwin.id, xwm.atoms._NET_WM_STATE, xproto::AtomEnum::ATOM, &values, )?; } } } ClientMessage(msg) if msg.type_ == xwm.atoms._NET_ACTIVE_WINDOW => { if let Some(target) = xwm.xwindows.get(&msg.window) { trace!(?target, "_NET_ACTIVE_WINDOW request"); replace_window_list( &xwm.conn, xwm.screen.root, xwm.atoms._NET_ACTIVE_WINDOW, [target.id], )?; } } ClientMessage(msg) => { if tracing::event_enabled!(tracing::Level::TRACE) { let name = get_atom_name(&xwm.conn, msg.type_)?; trace!(window = ?msg.window, atom = name, "ignoring ClientMessage") } } PropertyNotify(msg) => { if tracing::event_enabled!(tracing::Level::TRACE) { let name = get_atom_name(&xwm.conn, msg.atom)?; trace!(xwin = msg.window, state = ?msg.state, atom = name, "property changed"); } if let Some(xwin) = xwm.xwindows.get_mut(&msg.window) { match msg.atom { v if v == xwm.atoms._NET_WM_NAME => { xwin.title = fetch_string_property(&xwm.conn, msg.window, v)?; trace!(?xwin, "title changed"); } v if v == u32::from(xproto::AtomEnum::WM_CLASS) => { xwin.app_id = fetch_class(&xwm.conn, msg.window)?; trace!(?xwin, class = xwin.app_id, "class changed"); } v if v == xwm.atoms.WM_HINTS => { let hints = fetch_hints(&xwm.conn, msg.window)?; trace!(?xwin, ?hints, "hints changed"); xwin.hint_input = hints.and_then(|h| h.input).unwrap_or_default(); } v if v == xwm.atoms.WM_PROTOCOLS => { let protocols = fetch_protocols(&xwm.conn, xwm.atoms.WM_PROTOCOLS, msg.window)?; trace!(?xwin, ?protocols, "protocols changed"); } _ => (), } } } _ => (), } Ok(()) } fn fetch_string_property( conn: &X11Connection, window: xproto::Window, atom: impl Into, ) -> Result, ConnectionError> { let atom = atom.into(); let reply = match conn .get_property(false, window, atom, xproto::AtomEnum::ANY, 0, 1024)? .reply_unchecked() { Ok(Some(reply)) => reply, Ok(None) | Err(ConnectionError::ParseError(_)) => return Ok(None), Err(err) => return Err(err), }; let Some(bytes) = reply.value8() else { return Ok(None); }; match String::from_utf8(bytes.collect()) { Ok(v) => Ok(Some(v)), Err(_) => { trace!(?atom, "invalid string property"); Ok(None) } } } fn fetch_class( conn: &X11Connection, window: xproto::Window, ) -> Result, ConnectionError> { let reply = match x11rb::properties::WmClass::get(conn, window)?.reply_unchecked() { Ok(Some(reply)) => reply, Ok(None) | Err(ConnectionError::ParseError(_)) => return Ok(None), Err(err) => return Err(err), }; match std::str::from_utf8(reply.class()) { Ok(v) => Ok(Some(v.to_owned())), Err(_) => { trace!("WM_CLASS property is invalid string"); Ok(None) } } } fn fetch_hints( conn: &X11Connection, window: xproto::Window, ) -> Result, ConnectionError> { match x11rb::properties::WmHints::get(conn, window)?.reply_unchecked() { Ok(Some(reply)) => Ok(Some(reply)), Ok(None) | Err(ConnectionError::ParseError(_)) => Ok(None), Err(err) => Err(err), } } fn fetch_protocols( conn: &X11Connection, atom: impl Into, window: xproto::Window, ) -> Result, ConnectionError> { let reply = match conn .get_property(false, window, atom, xproto::AtomEnum::ATOM, 0, 1024)? .reply_unchecked() { Ok(Some(reply)) => reply, Ok(None) | Err(ConnectionError::ParseError(_)) => return Ok(HashSet::default()), Err(err) => return Err(err), }; let Some(vals) = reply.value32() else { return Ok(HashSet::default()); }; Ok(vals.collect()) } fn replace_window_list( conn: &X11Connection, win: xproto::Window, a: impl Into, list: impl AsRef<[u32]>, ) -> Result, ConnectionError> { conn.change_property32( xproto::PropMode::REPLACE, win, a, xproto::AtomEnum::WINDOW, list.as_ref(), ) } fn get_atom_name( conn: &X11Connection, atom: impl Into, ) -> Result { if let Some(reply) = conn.get_atom_name(atom.into())?.reply_unchecked()? { Ok(String::from_utf8(reply.name).unwrap_or("".to_string())) } else { Ok("".to_string()) } } ================================================ FILE: mm-server/src/session/compositor/xwayland.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 mod xwm; use std::{ io::{self, Read as _}, os::fd::{AsFd, AsRawFd as _}, path::{Path, PathBuf}, sync::Arc, }; use anyhow::{anyhow, bail, Context as _}; use pathsearch::find_executable_in_path; use tracing::{debug, trace}; pub use xwm::*; use crate::{ config::HomeIsolationMode, container::{Container, ContainerHandle}, session::compositor::ClientState, }; pub struct XWayland { pub display_socket: DisplaySocket, pub displayfd_recv: mio::unix::pipe::Receiver, pub child: ContainerHandle, extern_socket_dir: PathBuf, xwm_socket: Option, } // Where the socket gets mounted inside the container. const CONONICAL_DISPLAY_PATH: &str = "/tmp/.X11-unix"; pub struct DisplaySocket(u32); impl DisplaySocket { fn pick_unused() -> anyhow::Result { use rustix::net::*; // Because we're using a mount namespace, we don't need to worry about // system sockets in /tmp leaking into our container. However, because we // don't use a network namespace, it is possible for system abstract sockets // to be available. We can ensure that isn't the case by attempting to // bind the abstract socket. let mut display = 1; let sock = socket(AddressFamily::UNIX, SocketType::STREAM, None)?; loop { let dp = DisplaySocket(display); match rustix::net::bind( &sock, // By convention, the name is the same as the path. &SocketAddrUnix::new_abstract_name(dp.inner_path().as_os_str().as_encoded_bytes())?, ) { Ok(()) => return Ok(dp), // Discard the abstract socket. Err(e) if e.kind() == io::ErrorKind::AddrInUse => display += 1, Err(e) => return Err(e.into()), } } } pub fn display(&self) -> String { format!(":{}", self.0) } pub fn inner_path(&self) -> PathBuf { Path::new(CONONICAL_DISPLAY_PATH).join(format!("X{}", self.0)) } } impl XWayland { pub fn spawn( dh: &mut wayland_server::DisplayHandle, xdg_runtime_dir: impl AsRef, stdio: impl AsFd, ) -> anyhow::Result { let (xwm_xwayland, xwm_compositor) = mio::net::UnixStream::pair()?; let (wayland_xwayland, wayland_compositor) = mio::net::UnixStream::pair()?; // XWayland writes the the display number and a newline to this pipe when // it's ready. let (displayfd_send, displayfd_recv) = mio::unix::pipe::new()?; let display_socket = DisplaySocket::pick_unused()?; // Put the socket in a folder, so we can bind-mount that to // /tmp/.X11-unix inside the (app) container. let extern_socket_path = xdg_runtime_dir .as_ref() .join(display_socket.inner_path().strip_prefix("/").unwrap()); let extern_socket_dir = extern_socket_path.parent().unwrap(); std::fs::create_dir_all(extern_socket_dir)?; let socket = mio::net::UnixListener::bind(&extern_socket_path)?; let exe = find_executable_in_path("Xwayland") .ok_or(anyhow!("Xwayland not in PATH"))? .as_os_str() .to_owned(); let args = vec![ exe, "-verbose".into(), "-rootless".into(), "-terminate".into(), "-force-xrandr-emulation".into(), "-wm".into(), xwm_xwayland.as_raw_fd().to_string().into(), "-displayfd".into(), displayfd_send.as_raw_fd().to_string().into(), "-listenfd".into(), socket.as_raw_fd().to_string().into(), ]; let mut container = Container::new(args, HomeIsolationMode::Tmpfs)?; container.set_env( "WAYLAND_SOCKET", format!("{}", wayland_xwayland.as_raw_fd()), ); container.set_stdout(stdio.as_fd())?; container.set_stderr(stdio.as_fd())?; unsafe { container.pre_exec(move || { // unset the CLOEXEC flag from the sockets we need to pass // to xwayland. unset_cloexec(&wayland_xwayland)?; unset_cloexec(&xwm_xwayland)?; unset_cloexec(&displayfd_send)?; unset_cloexec(&socket)?; Ok(()) }); } let child = container.spawn().context("failed to spawn XWayland")?; debug!(x11_socket = ?extern_socket_path, "spawned Xwayland instance"); // Insert the client into the display handle. The order is important // here; XWayland never starts up at all unless it can roundtrip with // wayland. let _client = dh.insert_client( wayland_compositor.into(), Arc::new(ClientState { xwayland: true }), )?; Ok(Self { display_socket, displayfd_recv, child, extern_socket_dir: extern_socket_dir.to_owned(), xwm_socket: Some(xwm_compositor), }) } pub fn poll_ready(&mut self) -> anyhow::Result> { if self.xwm_socket.is_none() { bail!("XWayland already marked as ready") } let mut buf = [0; 64]; match self.displayfd_recv.read(&mut buf) { Ok(len) => { if (buf[..len]).contains(&b'\n') { trace!("Xwayland ready"); return Ok(self.xwm_socket.take()); } else { // Not ready yet. } } Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => (), Err(err) => return Err(err).context("reading from xwayland pipe failed"), } Ok(None) } pub fn prepare_socket(&self, container: &mut Container) { container.bind_mount(&self.extern_socket_dir, Path::new(CONONICAL_DISPLAY_PATH)); container.set_env("DISPLAY", self.display_socket.display()); } } fn unset_cloexec(socket_fd: impl AsFd) -> Result<(), rustix::io::Errno> { rustix::io::fcntl_setfd(socket_fd, rustix::io::FdFlags::empty()) } ================================================ FILE: mm-server/src/session/compositor.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{collections::BTreeMap, sync::Arc}; use protocols::*; use slotmap::SlotMap; use tracing::{debug, instrument, trace}; use wayland_protocols::{ wp::{ fractional_scale::v1::server::wp_fractional_scale_manager_v1, linux_dmabuf::zv1::server::zwp_linux_dmabuf_v1, linux_drm_syncobj::v1::server::wp_linux_drm_syncobj_manager_v1, pointer_constraints::zv1::server::zwp_pointer_constraints_v1, presentation_time::server::wp_presentation, relative_pointer::zv1::server::zwp_relative_pointer_manager_v1, text_input::zv3::server::zwp_text_input_manager_v3, }, xdg::shell::server::xdg_wm_base, xwayland::shell::v1::server::xwayland_shell_v1, }; use wayland_server::{ protocol::{self, wl_output, wl_shm}, Resource as _, }; use crate::{ session::{ control::*, video::{self, TextureSync}, SessionHandle, }, vulkan::VkContext, }; pub mod buffers; mod dispatch; mod oneshot_render; mod output; mod protocols; mod sealed; mod seat; mod serial; mod shm; mod stack; pub mod surface; pub mod xwayland; pub use seat::{ButtonState, KeyState}; use super::EPOCH; pub struct Compositor { serial: serial::Serial, surfaces: SlotMap, buffers: SlotMap, shm_pools: SlotMap, cached_dmabuf_feedback: buffers::CachedDmabufFeedback, imported_syncobj_timelines: SlotMap, in_flight_buffers: Vec, pending_presentation_feedback: Vec, surface_stack: Vec, active_surface: Option, output_proxies: Vec, // TODO: one seat per operator pub default_seat: seat::Seat, display_params: DisplayParams, session_handle: SessionHandle, xwm: Option, xwayland_surface_lookup: BTreeMap, // At the bottom for drop order. vk: Arc, } impl Compositor { pub fn new( vk: Arc, handle: SessionHandle, display_params: DisplayParams, ) -> anyhow::Result { let cached_dmabuf_feedback = buffers::CachedDmabufFeedback::new(vk.clone())?; Ok(Self { serial: serial::Serial::new(), surfaces: SlotMap::default(), buffers: SlotMap::default(), shm_pools: SlotMap::default(), cached_dmabuf_feedback, imported_syncobj_timelines: SlotMap::default(), in_flight_buffers: Vec::new(), pending_presentation_feedback: Vec::new(), surface_stack: Vec::new(), active_surface: None, output_proxies: Vec::new(), default_seat: seat::Seat::default(), display_params, session_handle: handle.clone(), xwm: None, xwayland_surface_lookup: BTreeMap::default(), vk, }) } pub fn update_display_params( &mut self, display_params: DisplayParams, active: bool, ) -> anyhow::Result<()> { let now = EPOCH.elapsed().as_millis() as u32; // Reconfigure all surfaces to be the right size. for surface in &self.surface_stack { let surf = &mut self.surfaces[*surface]; let xwin = surf.role.current.as_ref().and_then(|role| { if let surface::SurfaceRole::XWayland { serial } = role { self.xwm.as_ref().unwrap().xwindow_for_serial(*serial) } else { None } }); surf.reconfigure(display_params, xwin); if display_params.width != self.display_params.width || display_params.height != self.display_params.height || display_params.ui_scale != self.display_params.ui_scale { // Try to trick the surface into thinking it's moving to a // different monitor. This helps some games adjust to mode // changes. for wl_output in &self.output_proxies { if wl_output.client() == surf.wl_surface.client() { surf.wl_surface.leave(wl_output); surf.wl_surface.enter(wl_output); } } // Discharge any pending frame callbacks, since we won't // render the current content, and some clients get stuck // otherwise. if let Some(cb) = surf.frame_callback.current.take() { cb.done(now); } } } self.update_focus_and_visibility(active)?; self.display_params = display_params; self.emit_output_params(); Ok(()) } #[instrument(skip_all)] pub fn composite_frame( &mut self, video_pipeline: &mut video::EncodePipeline, ) -> anyhow::Result<()> { let now = EPOCH.elapsed().as_millis() as u32; let ready = unsafe { video_pipeline.begin()? }; if !ready { debug!("dropped frame because of backpressure"); return Ok(()); } // Iterate backwards to find the first fullscreen window. let first_visible_idx = self .surface_stack .iter() .rposition(|id| { self.surfaces[*id] .configuration .map_or(true, |conf| conf.fullscreen) }) .unwrap_or_default(); let num_surfaces = self.surface_stack.len() - first_visible_idx; let mut presentation_feedback = Vec::with_capacity(num_surfaces); for id in self.surface_stack[first_visible_idx..].iter() { let surface = &mut self.surfaces[*id]; let conf = surface .configuration .expect("mapped surface has no configuration"); let content = surface .content .as_mut() .expect("mapped surface has no content"); let buffer = &mut self.buffers[content.buffer]; let sync = match &mut buffer.backing { buffers::BufferBacking::Dmabuf { .. } => { if let Some((acquire, _)) = content.explicit_sync.as_ref() { Some(TextureSync::Explicit(acquire.clone())) } else { Some(TextureSync::ImplicitInterop) } } _ => None, }; unsafe { content.tp_done = video_pipeline.composite_surface(buffer, sync, conf)? }; if let Some(callback) = surface.frame_callback.current.take().as_mut() { callback.done(now); } if let Some(fb) = content.wp_presentation_feedback.take() { presentation_feedback.push(fb); } trace!(?surface, ?conf, "compositing surface"); } let tp_render = unsafe { video_pipeline.end_and_submit()? }; for fb in presentation_feedback.drain(..) { self.pending_presentation_feedback .push(surface::PendingPresentationFeedback(fb, tp_render.clone())); } Ok(()) } pub fn idle(&mut self, active: bool) -> anyhow::Result<()> { // Update the window stack, if it changed. self.update_focus_and_visibility(active)?; // Send any pending surface configures. self.configure_surfaces()?; // Check if the pointer is locked. self.update_pointer_lock(); // Send pending pointer frames. self.default_seat.pointer_frame(); // Release any unused buffers. self.release_buffers()?; // Send presentation feedback. self.send_presentation_feedback()?; Ok(()) } } #[derive(Debug, Default)] pub struct ClientState { xwayland: bool, } impl wayland_server::backend::ClientData for ClientState { fn initialized(&self, _client_id: wayland_server::backend::ClientId) {} fn disconnected( &self, _client_id: wayland_server::backend::ClientId, _reason: wayland_server::backend::DisconnectReason, ) { } } pub fn create_globals(dh: &wayland_server::DisplayHandle) { create_global::(dh, 6); create_global::(dh, 4); create_global::(dh, 6); create_global::(dh, 1); create_global::(dh, 9); create_global::(dh, 3); create_global::(dh, 1); create_global::(dh, 1); create_global::(dh, 1); create_global::(dh, 1); create_global::(dh, 5); create_global::(dh, 1); create_global::(dh, 1); create_global::(dh, 1); create_global::(dh, 2); } fn create_global( dh: &wayland_server::DisplayHandle, version: u32, ) where Compositor: wayland_server::GlobalDispatch, { let _ = dh.create_global::(version, ()); } ================================================ FILE: mm-server/src/session/control.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use crossbeam_channel::Sender; use crate::{ codec::{AudioCodec, VideoCodec}, color::VideoProfile, pixel_scale::PixelScale, server::stream::StreamWriter, session::compositor::{self, ButtonState}, }; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct DisplayParams { pub width: u32, pub height: u32, pub framerate: u32, pub ui_scale: PixelScale, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct VideoStreamParams { pub width: u32, pub height: u32, pub codec: VideoCodec, pub preset: u32, pub profile: VideoProfile, } #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct AudioStreamParams { pub sample_rate: u32, pub channels: u32, pub codec: AudioCodec, } pub enum ControlMessage { Stop, Attach { id: u64, sender: Sender, video_params: VideoStreamParams, audio_params: AudioStreamParams, stream_writer: StreamWriter, ready: oneshot::Sender<()>, }, Detach(u64), RefreshVideo, UpdateDisplayParams(DisplayParams), KeyboardInput { key_code: u32, state: compositor::KeyState, char: Option, }, PointerEntered, PointerLeft, PointerMotion(f64, f64), RelativePointerMotion(f64, f64), PointerInput { x: f64, y: f64, button_code: u32, state: ButtonState, }, PointerAxis(f64, f64), PointerAxisDiscrete(f64, f64), GamepadAvailable(u64), GamepadUnavailable(u64), GamepadAxis { id: u64, axis_code: u32, value: f64, }, GamepadTrigger { id: u64, trigger_code: u32, value: f64, }, GamepadInput { id: u64, button_code: u32, state: ButtonState, }, } #[derive(Debug, Clone)] pub enum SessionEvent { DisplayParamsChanged { params: DisplayParams, reattach: bool, }, VideoFrame { stream_seq: u64, seq: u64, frame: bytes::Bytes, }, AudioFrame { _stream_seq: u64, seq: u64, frame: bytes::Bytes, }, CursorUpdate { image: Option, icon: Option, hotspot_x: u32, hotspot_y: u32, }, PointerLocked(f64, f64), PointerReleased, Shutdown, } ================================================ FILE: mm-server/src/session/handle.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{collections::BTreeMap, sync::Arc}; use crossbeam_channel as crossbeam; use parking_lot::Mutex; use super::control::SessionEvent; use crate::server::stream::StreamWriter; struct Client { events: crossbeam::Sender, writer: StreamWriter, } struct Inner { attachments: BTreeMap, } #[derive(Clone)] pub struct SessionHandle(Arc>, Arc); impl SessionHandle { pub fn new(waker: Arc) -> Self { Self( Arc::new(Mutex::new(Inner { attachments: BTreeMap::new(), })), waker, ) } pub fn insert_client( &self, id: u64, events: crossbeam::Sender, writer: StreamWriter, ) { self.0 .lock() .attachments .insert(id, Client { events, writer }); } pub fn remove_client(&self, id: u64) { self.0.lock().attachments.remove(&id); } pub fn remove_all(&self) { self.0.lock().attachments.clear(); } pub fn dispatch(&self, event: SessionEvent) { let attachments = &self.0.lock().attachments; for (_, client) in attachments.iter() { let _ = client.events.send(event.clone()); } } pub fn dispatch_audio_frame(&self, pts: u64, frame: bytes::Bytes, stream_restart: bool) { let attachments = &mut self.0.lock().attachments; for (_, client) in attachments.iter_mut() { let (stream_seq, seq) = client .writer .write_audio_frame(pts, frame.clone(), stream_restart); let _ = client.events.send(SessionEvent::AudioFrame { _stream_seq: stream_seq, seq, frame: frame.clone(), }); } } pub fn dispatch_video_frame( &self, pts: u64, frame: bytes::Bytes, hierarchical_layer: u32, stream_restart: bool, ) { let attachments = &mut self.0.lock().attachments; for (_, client) in attachments.iter_mut() { let (stream_seq, seq) = client.writer.write_video_frame( pts, frame.clone(), hierarchical_layer, stream_restart, ); let _ = client.events.send(SessionEvent::VideoFrame { stream_seq, seq, frame: frame.clone(), }); } } pub fn wake(&self) -> std::io::Result<()> { self.1.wake() } pub fn kick_clients(&self) { let attachments = &mut self.0.lock().attachments; for (_, client) in std::mem::take(attachments) { let _ = client.events.send(SessionEvent::Shutdown); } } pub fn num_attachments(&self) -> usize { self.0.lock().attachments.len() } } ================================================ FILE: mm-server/src/session/input/udevfs.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ collections::BTreeMap, ffi::OsStr, path::{Path, PathBuf}, sync::Arc, time, }; use fuser as fuse; use libc::EBADF; use parking_lot::Mutex; use tracing::{debug, trace}; use super::DeviceState; const ENOENT: i32 = rustix::io::Errno::NOENT.raw_os_error(); const UDEV_INPUT_DATA: &[u8] = r#"E:ID_INPUT=1 E:ID_INPUT_JOYSTICK=1 E:ID_BUS=usb G:seat G:uaccess Q:seat Q:uaccess V:1 "# .as_bytes(); const ZERO_TTL: time::Duration = time::Duration::ZERO; #[derive(Debug, Clone)] struct Entry { path: PathBuf, attr: fuse::FileAttr, /// The associated device ID. dev: Option, } struct InodeCache { inodes: BTreeMap, next_inode: u64, ctime: time::SystemTime, } impl InodeCache { fn get_or_insert( &mut self, p: impl AsRef, mut attr: fuse::FileAttr, dev: Option, ) -> fuse::FileAttr { for entry in self.inodes.values() { if entry.path == p.as_ref() { return entry.attr; } } let ino = self.next_inode; self.next_inode += 1; attr.ino = ino; self.inodes.insert( ino, Entry { path: p.as_ref().to_owned(), attr, dev, }, ); attr } fn lookup_name(&self, inode: u64) -> Option<(PathBuf, Option)> { if inode == fuse::FUSE_ROOT_ID { return Some((Path::new("/").to_owned(), None)); } self.inodes .get(&inode) .map(|entry| (entry.path.clone(), entry.dev)) } fn reply_add_dirs

( &self, mut reply: fuse::ReplyDirectory, names: impl IntoIterator, skip: usize, ) where P: AsRef, { let mut offset = 1_i64; for name in names.into_iter().skip(skip) { for (ino, entry) in &self.inodes { if entry.path == name.as_ref() { if reply.add( *ino, offset, entry.attr.kind, entry.path.file_name().unwrap().to_str().unwrap(), ) { return reply.ok(); }; offset += 1; } } } reply.ok() } fn cache_dir(&mut self, p: impl AsRef, dev: Option) -> fuse::FileAttr { let attr = fuse::FileAttr { ino: 0, size: 0, blocks: 0, atime: self.ctime, mtime: self.ctime, ctime: self.ctime, crtime: time::SystemTime::UNIX_EPOCH, kind: fuse::FileType::Directory, perm: 0o777, nlink: 1, uid: 0, gid: 0, rdev: 0, blksize: 512, flags: 0, }; self.get_or_insert(p, attr, dev) } fn cache_file(&mut self, p: impl AsRef, dev: Option, len: usize) -> fuse::FileAttr { let attr = fuse::FileAttr { ino: 0, size: len as u64, blocks: 0, atime: time::UNIX_EPOCH, mtime: time::UNIX_EPOCH, ctime: time::UNIX_EPOCH, crtime: time::UNIX_EPOCH, kind: fuse::FileType::RegularFile, perm: 0o777, nlink: 1, uid: 0, gid: 0, rdev: 0, blksize: 512, flags: 0, }; self.get_or_insert(p, attr, dev) } fn cache_symlink(&mut self, p: impl AsRef, dev: Option) -> fuse::FileAttr { let attr = fuse::FileAttr { ino: 0, size: 0, blocks: 0, atime: self.ctime, mtime: self.ctime, ctime: self.ctime, crtime: time::SystemTime::UNIX_EPOCH, kind: fuse::FileType::Symlink, perm: 0o777, nlink: 1, uid: 0, gid: 0, rdev: 0, blksize: 512, flags: 0, }; self.get_or_insert(p, attr, dev) } } /// A FUSE filesystem designed to fool libudev. All incoming paths are intended /// to be absolute. The following paths are emulated: /// - /sys/devices/virtual/input: contains folders for each virtual input /// device. Contains both a top-level folder, inputX, and an eventX folder /// for the evdev node. /// - /sys/class/input: contains symlinks to the above device entries. /// - /sys/class/hidraw: empty, so that no hidraw devices can be found /// - /run/udev/control: an empty file that indicates udev is running /// - /run/udev/data: contains "c{major}:{minor}" files with metadata on each /// device. pub struct UdevFs { state: Arc>, tree: InodeCache, } impl UdevFs { pub fn new(state: Arc>) -> Self { Self { state, tree: InodeCache { inodes: Default::default(), next_inode: fuse::FUSE_ROOT_ID + 1, ctime: time::SystemTime::now(), }, } } } impl fuse::Filesystem for UdevFs { fn lookup( &mut self, _req: &fuse::Request<'_>, parent: u64, name: &std::ffi::OsStr, reply: fuse::ReplyEntry, ) { let Some(name) = name.to_str() else { debug!(?name, "invalid lookup name"); return reply.error(ENOENT); }; let inodes = &mut self.tree; let Some((parent_path, dev)) = inodes.lookup_name(parent) else { debug!(?parent, ?name, "lookup failed"); return reply.error(ENOENT); }; trace!(?parent_path, ?name, dev, "lookup"); match (parent_path.to_str().unwrap(), name, dev) { ("/", "sys", _) => reply.entry(&ZERO_TTL, &inodes.cache_dir("/sys", None), 0), ("/sys", "class", _) => { reply.entry(&ZERO_TTL, &inodes.cache_dir("/sys/class", None), 0) } ("/sys/class", "input", _) => { reply.entry(&ZERO_TTL, &inodes.cache_dir("/sys/class/input", None), 0) } ("/sys/class/input", name, _) => { let Some(dev) = self .state .lock() .device_by_eventname(name) .map(|dev| dev.id) else { debug!(name, "device not found in /sys/class/input"); return reply.error(ENOENT); }; reply.entry( &ZERO_TTL, &inodes.cache_symlink(parent_path.join(name), Some(dev)), 0, ); } ("/sys/class", "hidraw", _) => { reply.entry(&ZERO_TTL, &inodes.cache_dir("/sys/class/hidraw", None), 0) } ("/sys", "devices", _) => { reply.entry(&ZERO_TTL, &inodes.cache_dir("/sys/devices", None), 0) } ("/sys/devices", "virtual", _) => reply.entry( &ZERO_TTL, &inodes.cache_dir("/sys/devices/virtual", None), 0, ), ("/sys/devices/virtual", "input", _) => reply.entry( &ZERO_TTL, &inodes.cache_dir("/sys/devices/virtual/input", None), 0, ), ("/sys/devices/virtual/input", name, _) => { let Some(dev) = self.state.lock().device_by_devname(name).map(|dev| dev.id) else { debug!(name, "device not found in /sys/devices/virtual/input"); return reply.error(ENOENT); }; reply.entry( &ZERO_TTL, &inodes.cache_dir(parent_path.join(name), Some(dev)), 0, ); } (p, "uevent", Some(dev)) if p.starts_with("/sys/devices/virtual/input") => { let guard = self.state.lock(); let Some(dev) = guard.device_by_id(dev) else { debug!(?p, dev, "device not found in /sys/devices/virtual/input"); return reply.error(ENOENT); }; // Inside the device directory, there are two levels of subdirectories. let path = parent_path .strip_prefix(Path::new("/sys/devices/virtual/input")) .unwrap(); if path.as_os_str().is_empty() { unreachable!() // Handled by the case above this one. } // Distinguish between the inputX uevent and the eventX uevent. let content = if path.to_str().unwrap() == dev.devname { make_input_uevent(dev) } else if path .file_name() .unwrap() .to_str() .unwrap() .starts_with("event") { make_evdev_uevent(dev) } else { debug!(?parent_path, "unrecognized uevent path"); return reply.error(ENOENT); }; reply.entry( &ZERO_TTL, &self .tree .cache_file(parent_path.join("uevent"), Some(dev.id), content.len()), 0, ); } (p, "subsystem", Some(dev)) if p.starts_with("/sys/devices/virtual/input") => { reply.entry( &ZERO_TTL, &self .tree .cache_symlink(parent_path.join("subsystem"), Some(dev)), 0, ); } (p, name, Some(dev)) if p.starts_with("/sys/devices/virtual/input") && name.starts_with("event") => { // This is /sys/devices/virtual/input/inputX/eventX. reply.entry( &ZERO_TTL, &inodes.cache_dir(parent_path.join(name), Some(dev)), 0, ); } ("/", "run", _) => reply.entry(&ZERO_TTL, &inodes.cache_dir("/run", None), 0), ("/run", "udev", _) => reply.entry(&ZERO_TTL, &inodes.cache_dir("/run/udev", None), 0), ("/run/udev", "control", _) => reply.entry( &ZERO_TTL, &inodes.cache_file("/run/udev/control", None, 0), 0, ), ("/run/udev", "data", _) => { reply.entry(&ZERO_TTL, &inodes.cache_dir("/run/udev/data", None), 0) } ("/run/udev", "udev.conf.d", _) => reply.error(ENOENT), ("/run/udev/data", name, _) => { let guard = self.state.lock(); for dev in &guard.devices { if name == format!("c13:{}", dev.counter) { return reply.entry( &ZERO_TTL, &inodes.cache_file( parent_path.join(name), Some(dev.id), UDEV_INPUT_DATA.len(), ), 0, ); } } debug!(?name, "no device found in /run/udev/data"); reply.error(ENOENT); } (parent_name, name, dev) => { debug!(parent_name, name, dev, "udevfs lookup failed"); reply.error(ENOENT); } } } fn getattr( &mut self, _req: &fuse::Request<'_>, ino: u64, _fh: Option, reply: fuse::ReplyAttr, ) { let Some(entry) = self.tree.inodes.get(&ino) else { debug!(ino, "lookup failed"); return reply.error(ENOENT); }; reply.attr(&ZERO_TTL, &entry.attr); } fn readlink(&mut self, _req: &fuse::Request<'_>, ino: u64, reply: fuse::ReplyData) { let Some(entry) = self.tree.inodes.get(&ino) else { debug!(ino, "lookup failed"); return reply.error(ENOENT); }; trace!(path = ?entry.path, "readlink"); if let Some(name) = matches_prefix_with_name(&entry.path, "/sys/class/input") { let guard = self.state.lock(); let Some(dev) = guard.device_by_eventname(name) else { debug!(eventname = ?name, "device not found in /sys/devices/virtual/input"); return reply.error(ENOENT); }; let dst = Path::new("/sys/devices/virtual/input") .join(&dev.devname) .join(name); reply.data(dst.as_os_str().as_encoded_bytes()); } else if entry.path.starts_with("/sys/devices") && entry.path.file_name() == Some(Path::new("subsystem").as_os_str()) { reply.data(b"/sys/class/input"); } else { debug!(path = ?entry.path, dev = ?entry.dev, "readlink failed"); reply.error(ENOENT); } } fn read( &mut self, _req: &fuse::Request<'_>, ino: u64, _fh: u64, _offset: i64, _size: u32, _flags: i32, _lock_owner: Option, reply: fuse::ReplyData, ) { let Some(entry) = self.tree.inodes.get(&ino) else { debug!(ino, "lookup failed"); return reply.error(EBADF); }; trace!(path = ?entry.path, "read"); if entry.path.starts_with("/run/udev/data") { reply.data(UDEV_INPUT_DATA); } else if entry.dev.is_some() && entry.path.starts_with("/sys/devices") && entry.path.file_name() == Some(Path::new("uevent").as_os_str()) { let guard = self.state.lock(); let Some(dev) = guard.device_by_id(entry.dev.unwrap()) else { debug!(dev = ?entry.dev, "device lookup failed"); return reply.error(EBADF); }; let mut parent_path = entry.path.clone(); parent_path.pop(); if parent_path.file_name() == Some(&dev.eventname) { reply.data(&make_evdev_uevent(dev)) } else if parent_path.file_name() == Some(&dev.devname) { reply.data(&make_input_uevent(dev)) } else { debug!(?entry.path, "bad uevent path"); reply.error(EBADF); } } else { debug!(path = ?entry.path, dev = entry.dev, "read failed"); reply.error(EBADF); } } fn readdir( &mut self, _req: &fuse::Request<'_>, ino: u64, _fh: u64, skip: i64, mut reply: fuse::ReplyDirectory, ) { let inodes = &mut self.tree; let Some(Entry { path, dev, .. }) = inodes.inodes.get(&ino).cloned() else { debug!(ino, "lookup failed"); return reply.error(EBADF); }; trace!(?path, ?dev, "readdir"); let skip = skip as usize; match (path.to_str().unwrap(), dev) { ("/", _) => inodes.reply_add_dirs(reply, ["sys", "run"], skip), ("/sys", _) => inodes.reply_add_dirs(reply, ["class", "devices"], skip), ("/sys/class", _) => inodes.reply_add_dirs(reply, ["input", "hidraw"], skip), ("/sys/class/input", _) => { let guard = self.state.lock(); trace!("udev is enumerating devices in /sys/class/input"); for (idx, DeviceState { id, eventname, .. }) in guard.devices.iter().skip(skip).enumerate() { let attr = inodes.cache_symlink(path.join(eventname), Some(*id)); if reply.add( attr.ino, (idx as i64) + 1, fuse::FileType::Symlink, eventname, ) { break; } } reply.ok(); } ("/sys/class/hidraw", _) => { reply.ok() // Empty. } ("/sys/devices", _) => inodes.reply_add_dirs(reply, ["virtual"], skip), ("/sys/devices/virtual", _) => inodes.reply_add_dirs(reply, ["input"], skip), ("/sys/devices/virtual/input", _) => { let guard = self.state.lock(); trace!("udev is enumerating devices in /sys/devices/virtual/input"); for (idx, DeviceState { id, devname, .. }) in guard.devices.iter().skip(skip).enumerate() { let attr = inodes.cache_dir(path.join(devname), Some(*id)); if reply.add( attr.ino, (idx as i64) + 1, fuse::FileType::Directory, devname, ) { break; } } reply.ok(); } (_p, Some(_)) if matches_prefix_with_name(&path, "/sys/devices/virtual/input").is_some() => { // Note: this seems not to happen. // inodes.reply_add_dirs(reply, ["subsystem", "capabilities", // "uevent"], skip) } ("/run", _) => inodes.reply_add_dirs(reply, ["udev"], skip), ("/run/udev", _) => inodes.reply_add_dirs(reply, ["control", "data"], skip), ("/run/udev/data", _) => { // Note: this seems not to happen. } _ => { debug!(?path, ?dev, "readdir failed"); reply.error(ENOENT); } } } fn access(&mut self, _req: &fuse::Request<'_>, _ino: u64, _mask: i32, reply: fuse::ReplyEmpty) { reply.ok() } fn release( &mut self, _req: &fuse::Request<'_>, _ino: u64, _fh: u64, _flags: i32, _lock_owner: Option, _flush: bool, reply: fuse::ReplyEmpty, ) { reply.ok() } } fn make_input_uevent(_dev: &DeviceState) -> Vec { // TODO hack br#"PRODUCT=3/45e/2ea/408 NAME="Magic Mirror Emulated Controller" EV=20000b KEY=7fdb000000000000 0 0 0 0 ABS=3003f UNIQ="d0:bc:c1:db:1d:2f" "# .to_vec() } fn make_evdev_uevent(dev: &DeviceState) -> Vec { format!( "MAJOR=13\nMINOR={}\nDEVNAME=input/{}\n", dev.counter, dev.eventname.to_str().unwrap() ) .as_bytes() .to_vec() } fn matches_prefix_with_name(p: &Path, prefix: impl AsRef) -> Option<&OsStr> { match p.strip_prefix(prefix).ok()?.components().next() { Some(std::path::Component::Normal(devname)) => Some(devname), _ => None, } } ================================================ FILE: mm-server/src/session/input.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ ffi::{OsStr, OsString}, path::Path, sync::Arc, }; use fuser as fuse; use parking_lot::Mutex; use southpaw::{ sys::{EV_ABS, EV_KEY}, AbsAxis, AbsInfo, InputEvent, KeyCode, }; use tracing::{debug, error}; use crate::container::Container; mod udevfs; use udevfs::*; use super::compositor::ButtonState; /// A simulated gamepad layout. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] pub enum GamepadLayout { #[default] GenericDualStick, } /// Manages input devices (mostly gamepads) n a container using a variety of /// well-intentioned but horrible hacks. pub struct InputDeviceManager { southpaw: southpaw::DeviceTree, state: Arc>, } struct DeviceState { id: u64, counter: u16, devname: OsString, // inputX eventname: OsString, // eventX } #[derive(Default)] struct InputManagerState { counter: u16, devices: Vec, } impl InputManagerState { fn device_by_id(&self, id: u64) -> Option<&DeviceState> { self.devices.iter().find(|dev| dev.id == id) } fn device_by_devname(&self, name: impl AsRef) -> Option<&DeviceState> { self.devices.iter().find(|dev| dev.devname == name.as_ref()) } fn device_by_eventname(&self, name: impl AsRef) -> Option<&DeviceState> { self.devices .iter() .find(|dev| dev.eventname == name.as_ref()) } } /// A handle for a plugged gamepad. pub struct GamepadHandle { device: southpaw::Device, ev_buffer: Vec, pub permanent: bool, } impl GamepadHandle { pub(crate) fn axis(&mut self, axis_code: u32, value: f64) { let value = value.clamp(-1.0, 1.0) * 128.0 + 128.0; self.ev_buffer.push(InputEvent::new( EV_ABS, axis_code as u16, value.floor() as i32, )); } pub(crate) fn trigger(&mut self, trigger_code: u32, value: f64) { let value = value.clamp(0.0, 1.0) * 256.0; self.ev_buffer.push(InputEvent::new( EV_ABS, trigger_code as u16, value.floor() as i32, )) } pub(crate) fn input(&mut self, button_code: u32, state: ButtonState) { let value = match state { super::compositor::ButtonState::Pressed => 1, super::compositor::ButtonState::Released => 0, }; // The DualSense sends D-pad buttons as ABS_HAT0{X,Y}. let key_code = southpaw::KeyCode::try_from(button_code as u16); if let Some((axis, direction)) = match key_code { Ok(KeyCode::BtnDpadUp) => Some((AbsAxis::HAT0Y, -1)), Ok(KeyCode::BtnDpadDown) => Some((AbsAxis::HAT0Y, 1)), Ok(KeyCode::BtnDpadLeft) => Some((AbsAxis::HAT0X, -1)), Ok(KeyCode::BtnDpadRight) => Some((AbsAxis::HAT0X, 1)), _ => None, } { // Simulate a press and release, each in a frame. self.ev_buffer .push(InputEvent::new(EV_ABS, axis, value * direction)); return; } self.ev_buffer .push(InputEvent::new(EV_KEY, button_code as u16, value)); } pub(crate) fn frame(&mut self) { if let Err(err) = self.device.publish_packet(&self.ev_buffer) { error!(?err, "failed to publish event packet to device"); } self.ev_buffer.clear(); } } impl InputDeviceManager { pub fn new(container: &mut Container) -> anyhow::Result { let state = Arc::new(Mutex::new(InputManagerState::default())); let udevfs_path = container.intern_run_path().join(".udevfs"); let southpaw_path = container.intern_run_path().join(".southpaw"); let udevfs = UdevFs::new(state.clone()); let udevfs_path_clone = udevfs_path.clone(); let southpaw = southpaw::DeviceTree::new(); let southpaw_clone = southpaw.clone(); let southpaw_path_clone = southpaw_path.clone(); container.setup_hook(move |c| { let mode = 0o755 | rustix::fs::FileType::Directory.as_raw_mode(); let device_fd = c.fuse_mount(udevfs_path_clone, "udevfs", mode)?; let mut session = fuse::Session::from_fd(udevfs, device_fd, fuse::SessionACL::Owner); std::thread::spawn(move || session.run()); let device_fd = c.fuse_mount(southpaw_path_clone, "southpaw", mode)?; southpaw_clone.wrap_fd(device_fd); // Headless servers won't have /sys/devices/virtual/input, and we // can't mkdir the mount point, because it's sysfs. if !Path::new("/sys/devices/virtual/input").exists() { c.fs_mount( "/sys/devices/virtual", "tmpfs", rustix::mount::MountAttrFlags::empty(), [(c"mode", c"0777")], )?; } Ok(()) }); container.internal_bind_mount( udevfs_path.join("sys/devices/virtual/input"), "/sys/devices/virtual/input", ); container.internal_bind_mount(udevfs_path.join("sys/class/input"), "/sys/class/input"); container.internal_bind_mount(udevfs_path.join("run/udev"), "/run/udev"); container.internal_bind_mount(southpaw_path, "/dev/input"); // Shadow /sys/class/hidraw. if Path::new("/sys/class/hidraw").exists() { container .internal_bind_mount(udevfs_path.join("sys/class/hidraw"), "/sys/class/hidraw"); } // Without this, udev refuses to accept our FUSE filesystem. container.set_env("SYSTEMD_DEVICE_VERIFY_SYSFS", "false"); Ok(Self { state, southpaw }) } pub fn plug_gamepad( &mut self, id: u64, _layout: GamepadLayout, permanent: bool, ) -> anyhow::Result { debug!(id, ?_layout, "gamepad plugged"); let mut guard = self.state.lock(); guard.counter += 1; let counter = guard.counter; let devname = OsStr::new(&format!("input{counter}")).to_owned(); let eventname = OsStr::new(&format!("event{counter}")).to_owned(); let xy_absinfo = AbsInfo { value: 128, minimum: 0, maximum: 255, ..Default::default() }; let trigger_absinfo = AbsInfo { value: 0, minimum: 0, maximum: 255, ..Default::default() }; let dpad_absinfo = AbsInfo { value: 0, minimum: -1, maximum: 1, ..Default::default() }; let device = southpaw::Device::builder() .name("Magic Mirror Emulated Controller") .id(southpaw::BusType::Usb, 1234, 4567, 111) .supported_key_codes([ KeyCode::BtnSouth, KeyCode::BtnNorth, KeyCode::BtnEast, KeyCode::BtnWest, KeyCode::BtnTl, KeyCode::BtnTr, KeyCode::BtnTl2, KeyCode::BtnTr2, KeyCode::BtnSelect, KeyCode::BtnStart, KeyCode::BtnMode, KeyCode::BtnThumbl, KeyCode::BtnThumbr, ]) .supported_absolute_axis(AbsAxis::X, xy_absinfo) .supported_absolute_axis(AbsAxis::Y, xy_absinfo) .supported_absolute_axis(AbsAxis::RX, xy_absinfo) .supported_absolute_axis(AbsAxis::RY, xy_absinfo) .supported_absolute_axis(AbsAxis::Z, trigger_absinfo) .supported_absolute_axis(AbsAxis::RZ, trigger_absinfo) .supported_absolute_axis(AbsAxis::HAT0X, dpad_absinfo) .supported_absolute_axis(AbsAxis::HAT0Y, dpad_absinfo) .add_to_tree(&mut self.southpaw, &eventname)?; guard.devices.push(DeviceState { id, counter, devname, eventname, }); Ok(GamepadHandle { device, ev_buffer: Vec::new(), permanent, }) } } #[cfg(test)] mod test { use std::{fs::File, io::Read as _}; use rustix::pipe::{pipe_with, PipeFlags}; use super::{GamepadLayout, InputDeviceManager}; use crate::{config::HomeIsolationMode, container::Container}; fn run_in_container_with_gamepads(cmd: impl AsRef<[T]>) -> anyhow::Result where T: AsRef, { let command = cmd .as_ref() .iter() .map(|s| s.as_ref().to_owned().into()) .collect(); let mut container = Container::new(command, HomeIsolationMode::Tmpfs)?; let (pipe_rx, pipe_tx) = pipe_with(PipeFlags::CLOEXEC)?; container.set_stdout(pipe_tx)?; container.set_env("SYSTEMD_LOG_LEVEL", "debug"); let mut input_manager = InputDeviceManager::new(&mut container)?; let mut child = container.spawn()?; let _ = input_manager.plug_gamepad(1234, GamepadLayout::GenericDualStick, false)?; let _ = input_manager.plug_gamepad(5678, GamepadLayout::GenericDualStick, false)?; let _ = child.wait(); let mut buf = String::new(); File::from(pipe_rx).read_to_string(&mut buf)?; Ok(buf) } #[test_log::test] fn list_devices_subsystem() -> anyhow::Result<()> { let output = run_in_container_with_gamepads([ "udevadm", "--debug", "trigger", "--dry-run", "--verbose", "--subsystem-match", "input", ])?; let mut expected = String::new(); for path in [ "/sys/devices/virtual/input/input1", "/sys/devices/virtual/input/input1/event1", "/sys/devices/virtual/input/input2", "/sys/devices/virtual/input/input2/event2", ] { expected.push_str(path); expected.push('\n'); } pretty_assertions::assert_eq!(output, expected); Ok(()) } } ================================================ FILE: mm-server/src/session/reactor.rs ================================================ use std::{ collections::BTreeMap, ffi::{OsStr, OsString}, fs::File, io::{BufRead, BufReader}, os::fd::AsRawFd, path::{Path, PathBuf}, sync::Arc, time, }; use anyhow::{bail, Context as _}; use crossbeam_channel as crossbeam; use lazy_static::lazy_static; use tracing::{debug, trace, trace_span}; use super::{ audio, compositor::{self, xwayland, Compositor}, control::{AudioStreamParams, ControlMessage, DisplayParams, SessionEvent, VideoStreamParams}, input, video, GamepadLayout, SessionHandle, }; use crate::{ config::AppConfig, container::{Container, ContainerHandle}, pixel_scale::PixelScale, server::stream::StreamWriter, vulkan::VkContext, waking_sender::WakingSender, }; lazy_static! { pub static ref EPOCH: std::time::Instant = std::time::Instant::now(); } const READY_TIMEOUT: std::time::Duration = time::Duration::from_secs(30); const DISPLAY: mio::Token = mio::Token(0); const ACCEPT: mio::Token = mio::Token(1); const CHILD: mio::Token = mio::Token(2); const WAKER: mio::Token = mio::Token(3); const TIMER: mio::Token = mio::Token(4); const XDISPLAY: mio::Token = mio::Token(10); const XWAYLAND: mio::Token = mio::Token(11); const XWAYLAND_READY: mio::Token = mio::Token(12); pub struct Reactor { poll: mio::Poll, waker: Arc, compositor: Compositor, session_handle: SessionHandle, listening_socket: wayland_server::ListeningSocket, wayland_display: wayland_server::Display, app_config: AppConfig, child: ContainerHandle, child_debug_log: Option, display_params: DisplayParams, new_display_params: Option, audio_pipeline: audio::EncodePipeline, video_pipeline: Option, new_video_stream_params: Option, input_manager: input::InputDeviceManager, gamepads: BTreeMap, xwayland: Option, xwayland_debug_log: Option, pending_attachments: Vec, ready_once: Option>>, timer: mio_timerfd::TimerFd, sleeping: bool, shutting_down: bool, vk: Arc, } impl Reactor { pub fn run( vk: Arc, app_config: AppConfig, display_params: DisplayParams, permanent_gamepads: Vec<(u64, GamepadLayout)>, bug_report_dir: Option, ready_send: oneshot::Sender>, ) -> anyhow::Result<()> { let mut display = wayland_server::Display::new().context("failed to create display")?; let ui_scale = if app_config.force_1x_scale { PixelScale::ONE } else { display_params.ui_scale }; trace!( %ui_scale, width = display_params.width, height = display_params.height, "configuring virtual display" ); // Create wayland globals. let dh = display.handle(); compositor::create_globals(&dh); let mut container = Container::new( app_config.command.clone(), app_config.home_isolation_mode.clone(), ) .context("initializing container")?; for (k, v) in &app_config.env { container.set_env(k, v); } let poll = mio::Poll::new()?; let waker = Arc::new(mio::Waker::new(poll.registry(), WAKER)?); let handle = SessionHandle::new(waker.clone()); let display_fd = display.backend().poll_fd().as_raw_fd(); poll.registry().register( &mut mio::unix::SourceFd(&display_fd), DISPLAY, mio::Interest::READABLE, )?; // Bind the listening socket. let socket_name = gen_socket_name(); let socket_path = container.extern_run_path().join(&socket_name); let listening_socket = wayland_server::ListeningSocket::bind_absolute(socket_path.clone())?; trace!(?socket_path, "bound wayland socket"); let listener_fd = listening_socket.as_raw_fd(); poll.registry().register( &mut mio::unix::SourceFd(&listener_fd), ACCEPT, mio::Interest::READABLE, )?; // Set up the pulse audio server. let audio_pipeline = audio::EncodePipeline::new(handle.clone(), container.extern_run_path())?; // Set up compositor state. let compositor = compositor::Compositor::new( vk.clone(), handle.clone(), DisplayParams { ui_scale, // Overridden by force_1x_scale. ..display_params }, )?; // Set up input emulation (this is just for gamepads). let mut input_manager = input::InputDeviceManager::new(&mut container)?; let mut gamepads = BTreeMap::new(); for (pad_id, layout) in permanent_gamepads { let dev = input_manager.plug_gamepad(pad_id, layout, true)?; gamepads.insert(pad_id, dev); } // Spawn Xwayland, if we're using it. let (xwayland, xwayland_recv, xwayland_debug_log) = if app_config.xwayland { let mut xwayland_debug_log = if let Some(bug_report_dir) = bug_report_dir.as_ref() { let path = bug_report_dir.join("xwayland.log"); Some(std::fs::File::create(path).context("failed to create xwayland logfile")?) } else { None }; let (output_send, mut output_recv) = mio::unix::pipe::new()?; let mut xwayland = match xwayland::XWayland::spawn( &mut display.handle(), container.extern_run_path(), output_send, ) { Ok(xw) => xw, Err(e) => { // Make sure we save any errors. dump_child_output( &mut BufReader::new(&mut output_recv), &mut xwayland_debug_log, ); return Err(e).context("spawning Xwayland"); } }; // Xwayland writes to this pipe when it's ready. poll.registry().register( &mut xwayland.displayfd_recv, XWAYLAND_READY, mio::Interest::READABLE, )?; // Stderr/stdout of the xwayland process. poll.registry() .register(&mut output_recv, XWAYLAND, mio::Interest::READABLE)?; (Some(xwayland), Some(output_recv), xwayland_debug_log) } else { (None, None, None) }; // Spawn the client with a pipe as stdout/stderr. let (pipe_send, mut pipe_recv) = mio::unix::pipe::new()?; container.set_stdout(&pipe_send)?; container.set_stderr(&pipe_send)?; drop(pipe_send); // Set the wayland socket and X11 sockets. The wayland socket is a // relative path inside XDG_RUNTIME_DIR. The X11 socket is special // and has to be in a specific location for XCB to work on all systems. container.set_env("WAYLAND_DISPLAY", &socket_name); if let Some(xwayland) = &xwayland { xwayland.prepare_socket(&mut container); } // Shadow pipewire, just in case. container.set_env("PIPEWIRE_REMOTE", "(null)"); let child = match container.spawn() { Ok(ch) => ch, Err(e) => { // Make sure we pump the child stdio and catch any container-related // error. let mut debug_log = bug_report_dir .as_ref() .and_then(|dir| std::fs::File::create(dir.join("child.log")).ok()); let mut child_output = BufReader::new(&mut pipe_recv); dump_child_output(&mut child_output, &mut debug_log); return Err(e).context("starting application container"); } }; poll.registry().register( &mut mio::unix::SourceFd(&child.pidfd().as_raw_fd()), CHILD, mio::Interest::READABLE, )?; poll.registry() .register(&mut pipe_recv, CHILD, mio::Interest::READABLE)?; // Use `glxinfo` and `eglinfo` to generate more debugging help. if let Some(bug_report_dir) = bug_report_dir.as_ref() { let p = bug_report_dir.to_owned(); let wayland_socket = socket_name.clone(); let x11_socket = xwayland .as_ref() .map(|x| x.display_socket.inner_path().clone()); std::thread::spawn(move || { save_glxinfo_eglinfo( &p, &wayland_socket, x11_socket.as_ref().map(|p| p.as_os_str()), ); }); } // If bug report mode is enabled, save the stdout/stderr of the child to // a logfile. let child_debug_log = if let Some(bug_report_dir) = bug_report_dir.as_ref() { let path = bug_report_dir.join(format!("child-{}.log", child.pid().as_raw_nonzero())); Some(std::fs::File::create(path).context("failed to create child logfile")?) } else { None }; // Framerate timer (simulates vblank). let mut timer = mio_timerfd::TimerFd::new(mio_timerfd::ClockId::Monotonic)?; poll.registry() .register(&mut timer, TIMER, mio::Interest::READABLE)?; let mut reactor = Self { poll, waker, wayland_display: display, compositor, session_handle: handle, listening_socket, app_config, child, child_debug_log, display_params, new_display_params: None, audio_pipeline, video_pipeline: None, new_video_stream_params: None, input_manager, gamepads, pending_attachments: Vec::new(), xwayland, xwayland_debug_log, ready_once: Some(ready_send), timer, sleeping: false, shutting_down: false, vk, }; reactor.main_loop(pipe_recv, xwayland_recv) } fn main_loop( &mut self, mut child_pipe: mio::unix::pipe::Receiver, mut xwayland_pipe: Option, ) -> Result<(), anyhow::Error> { let mut events = mio::Events::with_capacity(64); let (control_send, control_recv) = crossbeam::unbounded(); let control_send = WakingSender::new(self.waker.clone(), control_send); let start = time::Instant::now(); let mut child_output = BufReader::new(&mut child_pipe); let mut xwayland_output = xwayland_pipe.as_mut().map(BufReader::new); loop { trace_span!("poll").in_scope(|| self.poll.poll(&mut events, None))?; for event in events.iter() { match event.token() { ACCEPT => { if let Some(client_stream) = self.listening_socket.accept()? { let _client = self.wayland_display.handle().insert_client( client_stream, Arc::new(compositor::ClientState::default()), )?; debug!("client app connected"); } } CHILD if event.is_read_closed() => { self.child.wait()?; self.session_handle.kick_clients(); if self.ready_once.is_some() { // The client exited immediately, which is an error. bail!("client exited without doing anything"); } else { return Ok(()); } } CHILD if event.is_readable() => { dump_child_output(&mut child_output, &mut self.child_debug_log) } WAKER => loop { match control_recv.try_recv() { Ok(ControlMessage::Stop) => { self.session_handle.kick_clients(); self.shutting_down = true; trace!("shutting down"); // Usually, TERM doesn't work, because the // process is PID 1 in the container. self.child.signal(rustix::process::Signal::KILL)?; } Ok(msg) => self.handle_control_message(msg)?, Err(crossbeam::TryRecvError::Empty) => break, Err(crossbeam::TryRecvError::Disconnected) => { panic!("control channel disconnected") } } }, DISPLAY => { trace!("dispatching display"); self.wayland_display .dispatch_clients(&mut self.compositor) .context("failed to dispatch the wayland display")?; } XDISPLAY => { trace!("dispatching xwm"); self.compositor .dispatch_xwm() .context("failed to dispatch the xwm")?; } XWAYLAND_READY => { let xwayland = self.xwayland.as_mut().unwrap(); if let Some(socket) = xwayland.poll_ready()? { self.poll .registry() .deregister(&mut xwayland.displayfd_recv)?; // Setup the XWM connection to the Xwayland server. let fd = self.compositor.insert_xwayland(socket)?; self.poll.registry().register( &mut mio::unix::SourceFd(&fd.as_raw_fd()), XDISPLAY, mio::Interest::READABLE, )?; } } XWAYLAND if event.is_read_closed() => { self.xwayland.as_mut().unwrap().child.wait()?; } XWAYLAND if event.is_readable() => { dump_child_output( xwayland_output.as_mut().unwrap(), &mut self.xwayland_debug_log, ); } TIMER => { self.timer.read()?; // Check if we need to resize the virtual display. if let Some(new_params) = self.new_display_params.take() { self.update_display_params(new_params)?; // Update the render timer to match the new framerate. self.timer .set_timeout_interval(&time::Duration::from_secs_f64( 1.0 / self.display_params.framerate as f64, ))?; } self.frame()?; } _ => unreachable!(), } } if !self.shutting_down { self.idle()?; } // Check that we haven't timed out waiting for the client to start up. if self.ready_once.is_some() && self.compositor.surfaces_ready() { self.ready_once.take().unwrap().send(control_send.clone())?; } else if self.ready_once.is_some() && start.elapsed() > READY_TIMEOUT { self.child.signal(rustix::process::Signal::KILL)?; bail!("timed out waiting for client"); } // Sleep if we're not active. if !self.sleeping && !self.active() { self.sleeping = true; self.timer .set_timeout_interval(&time::Duration::from_secs(1))?; } else if self.sleeping && self.active() { self.sleeping = false; self.timer .set_timeout_interval(&time::Duration::from_secs_f64( 1.0 / self.display_params.framerate as f64, ))?; } } } fn idle(&mut self) -> anyhow::Result<()> { // Accept any waiting clients, but only if we're not mid-resize. if !self.pending_attachments.is_empty() && self.new_display_params.is_none() && self.compositor.surfaces_ready() { let pending_attachments = self.pending_attachments.drain(..).collect::>(); for attach_msg in pending_attachments { if let ControlMessage::Attach { id, sender, video_params, audio_params, stream_writer, ready, } = attach_msg { // Check if the caller is still waiting. if ready.send(()).is_ok() { self.attach(id, sender, video_params, audio_params, stream_writer)?; } } else { unreachable!() } } } // Perform compositor upkeep. self.compositor.idle(self.active())?; // Send pending controller SYN_REPORT events. for (_, dev) in self.gamepads.iter_mut() { dev.frame() } // Flush events to the app. self.wayland_display.flush_clients()?; Ok(()) } fn active(&self) -> bool { self.session_handle.num_attachments() > 0 || !self.pending_attachments.is_empty() } fn update_display_params(&mut self, params: DisplayParams) -> anyhow::Result<()> { let old = self.display_params; let old_ui_scale = self.display_params.ui_scale; let new_ui_scale = if self.app_config.force_1x_scale { PixelScale::ONE } else { params.ui_scale }; let size_changed = old.width != params.width || old.height != params.height; let scale_changed = old_ui_scale != new_ui_scale; let framerate_changed = old.framerate != params.framerate; if size_changed || scale_changed || framerate_changed { debug!( old_width = old.width, new_width = params.width, old_height = old.height, new_height = params.height, old_framerate = old.framerate, new_framerate = params.framerate, old_ui_scale = %old_ui_scale, new_ui_scale = %new_ui_scale, "resizing output", ); // If the size or framerate is different, force the client to reattach. // TODO: if we support multiple attachments, or attachments that // differ in resolution from the render res, we need to check for // that here. For now, it's safe to just kill the attachment streams. let force_reattach = size_changed || framerate_changed; self.compositor.update_display_params( DisplayParams { ui_scale: new_ui_scale, ..params }, // If we're forcing clients to reattach, the attachment is about // end, so configure the surfaces as inactive. !force_reattach, )?; self.session_handle .dispatch(SessionEvent::DisplayParamsChanged { params, reattach: force_reattach, }); if force_reattach { // Clear any pending attachments which don't match the new output. self.pending_attachments.retain(|pending| { let ControlMessage::Attach { video_params: VideoStreamParams { width, height, .. }, .. } = pending else { unreachable!() }; *width == params.width && *height == params.height }); // Clear any current attachments. self.session_handle.remove_all(); self.audio_pipeline.stop_stream(); self.video_pipeline = None; self.new_video_stream_params = None; } } else if params.ui_scale != old.ui_scale { // Synthesize a param change if we are forcing 1x scale. self.session_handle .dispatch(SessionEvent::DisplayParamsChanged { params, reattach: false, }); } self.display_params = DisplayParams { ui_scale: new_ui_scale, ..params }; Ok(()) } fn frame(&mut self) -> anyhow::Result<()> { #[cfg(feature = "tracy")] tracy_client::frame_mark(); if self.session_handle.num_attachments() == 0 { return Ok(()); } if !self.compositor.surfaces_ready() { return Ok(()); } if let Some(params) = self.new_video_stream_params.take() { self.video_pipeline = Some(video::EncodePipeline::new( self.vk.clone(), self.session_handle.clone(), self.display_params, params, )?); } let Some(video_pipeline) = &mut self.video_pipeline else { return Ok(()); }; // Composite visible surfaces. self.compositor.composite_frame(video_pipeline)?; // Render the cursor, if needed. self.compositor.render_cursor()?; Ok(()) } fn attach( &mut self, id: u64, sender: crossbeam::Sender, video_params: VideoStreamParams, audio_params: AudioStreamParams, stream_writer: StreamWriter, ) -> anyhow::Result<()> { if self.session_handle.num_attachments() > 0 { unimplemented!(); } self.session_handle.insert_client(id, sender, stream_writer); self.new_video_stream_params = Some(video_params); self.audio_pipeline.restart_stream(audio_params)?; self.compositor.update_focus_and_visibility(true)?; self.compositor.dispatch_cursor(); if let Some(coords) = self.compositor.default_seat.pointer_locked() { let (x, y) = coords.into(); self.session_handle .dispatch(SessionEvent::PointerLocked(x, y)); } Ok(()) } fn handle_control_message(&mut self, msg: ControlMessage) -> anyhow::Result<()> { if self.shutting_down { // We're about to shut down, so ignore all messages. return Ok(()); } // Attachments get handled asynchronously. if matches!(msg, ControlMessage::Attach { .. }) { self.pending_attachments.push(msg); return Ok(()); } match msg { ControlMessage::Detach(id) => { self.session_handle.remove_client(id); self.pending_attachments.retain(|msg| { let ControlMessage::Attach { id: pending_id, .. } = msg else { unreachable!(); }; *pending_id != id }); if !self.active() { self.audio_pipeline.stop_stream(); self.video_pipeline = None; self.compositor.update_focus_and_visibility(false)?; } } ControlMessage::RefreshVideo => { if let Some(video) = &mut self.video_pipeline { video.request_refresh(); } } ControlMessage::UpdateDisplayParams(params) => { // Updates once per render. self.new_display_params = Some(params); } ControlMessage::KeyboardInput { .. } | ControlMessage::PointerInput { .. } | ControlMessage::PointerMotion { .. } | ControlMessage::RelativePointerMotion { .. } | ControlMessage::PointerAxis(_, _) | ControlMessage::PointerAxisDiscrete(_, _) | ControlMessage::PointerEntered | ControlMessage::PointerLeft => self.compositor.handle_input_event(msg), ControlMessage::GamepadAvailable(id) => { use std::collections::btree_map::Entry; if let Entry::Vacant(e) = self.gamepads.entry(id) { e.insert(self.input_manager.plug_gamepad( id, input::GamepadLayout::GenericDualStick, false, )?); } } ControlMessage::GamepadUnavailable(id) => { use std::collections::btree_map::Entry; match self.gamepads.entry(id) { Entry::Occupied(v) if !v.get().permanent => { v.remove(); } _ => (), } } ControlMessage::GamepadAxis { id, axis_code, value, } => { if let Some(gamepad) = self.gamepads.get_mut(&id) { gamepad.axis(axis_code, value); } } ControlMessage::GamepadTrigger { id, trigger_code, value, } => { if let Some(gamepad) = self.gamepads.get_mut(&id) { gamepad.trigger(trigger_code, value); } } ControlMessage::GamepadInput { id, button_code, state, } => { if let Some(gamepad) = self.gamepads.get_mut(&id) { gamepad.input(button_code, state); } } // Handled above. ControlMessage::Stop | ControlMessage::Attach { .. } => unreachable!(), } Ok(()) } } fn gen_socket_name() -> OsString { use rand::Rng; let id: u64 = rand::thread_rng().gen(); format!("magic-mirror-{}", id).into() } fn dump_child_output(pipe: &mut impl BufRead, debug_log: &mut Option) { let mut buf = String::new(); loop { buf.clear(); match pipe.read_line(&mut buf) { Ok(1..) => { if let Some(debug_log) = debug_log { let _ = std::io::Write::write_all(debug_log, buf.as_bytes()); } let buf = buf.trim(); if !buf.is_empty() { trace!(target: "mmserver::session::child", "{}", buf); } } Ok(0) => break, Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => break, Err(e) => { debug!("child error: {:?}", e); break; } } } } fn save_glxinfo_eglinfo( bug_report_dir: impl AsRef, socket_name: &OsStr, x11_display: Option<&OsStr>, ) { use std::process::Command; if let Some(x11_display) = x11_display { match Command::new("glxinfo") .env_clear() .env("DISPLAY", x11_display) .output() { Ok(output) => { let _ = std::fs::write(bug_report_dir.as_ref().join("glxinfo.log"), output.stdout); } Err(e) => debug!("failed to run glxinfo: {:#}", e), } } match Command::new("eglinfo") .env_clear() .env("WAYLAND_DISPLAY", socket_name) .output() { Ok(output) => { let _ = std::fs::write(bug_report_dir.as_ref().join("eglinfo.log"), output.stdout); } Err(e) => debug!("failed to run eglinfo: {:#}", e), } } ================================================ FILE: mm-server/src/session/video/composite.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::sync::Arc; use anyhow::Context; use ash::vk; use cstr::cstr; use crate::{color::ColorSpace, vulkan::*}; pub const BLEND_FORMAT: vk::Format = vk::Format::R16G16B16A16_SFLOAT; // Also defined in composite.slang. #[repr(u32)] #[derive(Copy, Clone, Debug)] enum SurfaceColorSpace { Srgb = 0, LinearExtSrgb = 1, Hdr10 = 2, } impl From for SurfaceColorSpace { fn from(cs: ColorSpace) -> Self { match cs { ColorSpace::Srgb => SurfaceColorSpace::Srgb, ColorSpace::LinearExtSrgb => SurfaceColorSpace::LinearExtSrgb, ColorSpace::Hdr10 => SurfaceColorSpace::Hdr10, } } } #[derive(Copy, Clone, Debug)] #[repr(C)] #[allow(dead_code)] struct SurfacePC { // Should be in texture coords: [0, 1]. src_pos: glam::Vec2, src_size: glam::Vec2, // Should be in clip coords: [-1, 1]. // TODO: suck it up and use a matrix transform (mat3) to support rotations. dst_pos: glam::Vec2, dst_size: glam::Vec2, color_space: SurfaceColorSpace, } /// Composites surfaces into a blend image. pub struct CompositePipeline { descriptor_set_layout: vk::DescriptorSetLayout, pipeline_layout: vk::PipelineLayout, pipeline: vk::Pipeline, sampler: vk::Sampler, vk: Arc, } impl CompositePipeline { pub fn new(vk: Arc) -> anyhow::Result { let sampler = { let create_info = vk::SamplerCreateInfo::default() .mag_filter(vk::Filter::LINEAR) .min_filter(vk::Filter::LINEAR) .address_mode_u(vk::SamplerAddressMode::REPEAT) .address_mode_v(vk::SamplerAddressMode::REPEAT) .address_mode_w(vk::SamplerAddressMode::REPEAT); unsafe { vk.device.create_sampler(&create_info, None)? } }; let descriptor_set_layout = { let samplers = [sampler]; let binding = vk::DescriptorSetLayoutBinding::default() .binding(0) .descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER) .descriptor_count(1) .stage_flags(vk::ShaderStageFlags::FRAGMENT) .immutable_samplers(&samplers); let bindings = [binding]; let create_info = vk::DescriptorSetLayoutCreateInfo::default() .bindings(&bindings) .flags(vk::DescriptorSetLayoutCreateFlags::PUSH_DESCRIPTOR_KHR); unsafe { vk.device.create_descriptor_set_layout(&create_info, None)? } }; let pipeline_layout = { let ranges = [vk::PushConstantRange::default() .stage_flags(vk::ShaderStageFlags::VERTEX | vk::ShaderStageFlags::FRAGMENT) .offset(0) .size(std::mem::size_of::() as u32)]; let set_layouts = [descriptor_set_layout]; let create_info = vk::PipelineLayoutCreateInfo::default() .push_constant_ranges(&ranges) .set_layouts(&set_layouts); unsafe { vk.device.create_pipeline_layout(&create_info, None)? } }; let pipeline = { let vert_bytes = include_bytes!(concat!(env!("OUT_DIR"), "/shaders/composite_vert.spv")); let frag_bytes = include_bytes!(concat!(env!("OUT_DIR"), "/shaders/composite_frag.spv")); let vert_shader = load_shader(&vk.device, vert_bytes).context("loading vert.spv")?; let frag_shader = load_shader(&vk.device, frag_bytes).context("loading frag.spv")?; let vert_stage = vk::PipelineShaderStageCreateInfo::default() .stage(vk::ShaderStageFlags::VERTEX) .module(vert_shader) .name(cstr!("main")); let frag_stage = vk::PipelineShaderStageCreateInfo::default() .stage(vk::ShaderStageFlags::FRAGMENT) .module(frag_shader) .name(cstr!("main")); let vertex_input_state = vk::PipelineVertexInputStateCreateInfo::default(); let input_assembly_state = vk::PipelineInputAssemblyStateCreateInfo::default() .topology(vk::PrimitiveTopology::TRIANGLE_STRIP) .primitive_restart_enable(false); let dynamic_state = vk::PipelineDynamicStateCreateInfo::default() .dynamic_states(&[vk::DynamicState::VIEWPORT, vk::DynamicState::SCISSOR]); let viewport_state = vk::PipelineViewportStateCreateInfo::default() .viewport_count(1) .scissor_count(1); let rasterization_state = vk::PipelineRasterizationStateCreateInfo::default() .depth_clamp_enable(false) .rasterizer_discard_enable(false) .polygon_mode(vk::PolygonMode::FILL) .line_width(1.0) .cull_mode(vk::CullModeFlags::NONE) .front_face(vk::FrontFace::CLOCKWISE) .depth_bias_enable(false); let multisample_state = vk::PipelineMultisampleStateCreateInfo::default() .sample_shading_enable(false) .rasterization_samples(vk::SampleCountFlags::TYPE_1); let attachment = vk::PipelineColorBlendAttachmentState::default() .color_write_mask(vk::ColorComponentFlags::RGBA) .blend_enable(true) .src_color_blend_factor(vk::BlendFactor::SRC_ALPHA) .dst_color_blend_factor(vk::BlendFactor::ONE_MINUS_SRC_ALPHA) .color_blend_op(vk::BlendOp::ADD) .src_alpha_blend_factor(vk::BlendFactor::ONE) .dst_alpha_blend_factor(vk::BlendFactor::ZERO) .alpha_blend_op(vk::BlendOp::ADD); let attachments = [attachment]; let color_blend_state = vk::PipelineColorBlendStateCreateInfo::default() .logic_op_enable(false) .attachments(&attachments); let formats = [BLEND_FORMAT]; let mut pipeline_rendering = vk::PipelineRenderingCreateInfo::default().color_attachment_formats(&formats); let stages = [vert_stage, frag_stage]; let create_info = vk::GraphicsPipelineCreateInfo::default() .stages(&stages) .vertex_input_state(&vertex_input_state) .input_assembly_state(&input_assembly_state) .dynamic_state(&dynamic_state) .viewport_state(&viewport_state) .rasterization_state(&rasterization_state) .multisample_state(&multisample_state) .color_blend_state(&color_blend_state) .layout(pipeline_layout) .push_next(&mut pipeline_rendering); unsafe { let pipeline = match vk.device.create_graphics_pipelines( vk::PipelineCache::null(), &[create_info], None, ) { Ok(pipelines) => Ok(pipelines[0]), Err((_, e)) => Err(e), }?; vk.device.destroy_shader_module(vert_shader, None); vk.device.destroy_shader_module(frag_shader, None); pipeline } }; Ok(Self { descriptor_set_layout, pipeline_layout, pipeline, sampler, vk, }) } pub unsafe fn begin_compositing(&self, cb: vk::CommandBuffer, render_target: &VkImage) { let device = &self.vk.device; // Set the viewport and scissor. let rect = render_target.rect(); { let viewport = vk::Viewport::default() .x(0.0) .y(0.0) .width(render_target.width as f32) .height(render_target.height as f32) .min_depth(0.0) .max_depth(1.0); device.cmd_set_viewport(cb, 0, &[viewport]); device.cmd_set_scissor(cb, 0, &[rect]); } // Attach the render target. let clear_value = vk::ClearValue { color: vk::ClearColorValue { #[cfg(debug_assertions)] float32: [0.0, 0.3, 1.0, 1.0], // Blue for debug. #[cfg(not(debug_assertions))] float32: [0.0, 0.0, 0.0, 1.0], }, }; let color_attachment = vk::RenderingAttachmentInfo::default() .image_view(render_target.view) .image_layout(vk::ImageLayout::ATTACHMENT_OPTIMAL) .load_op(vk::AttachmentLoadOp::CLEAR) .store_op(vk::AttachmentStoreOp::STORE) .clear_value(clear_value); let color_attachments = [color_attachment]; let rendering_info = vk::RenderingInfo::default() .render_area(rect) .color_attachments(&color_attachments) .layer_count(1); device.cmd_begin_rendering(cb, &rendering_info); device.cmd_bind_pipeline(cb, vk::PipelineBindPoint::GRAPHICS, self.pipeline); } /// Draws the surface texture to the output. The texture should already /// be in the correct layout. pub unsafe fn composite_surface( &self, cb: vk::CommandBuffer, view: vk::ImageView, // In clip coordinates. // TODO: mat3 transform dst_pos: glam::Vec2, dst_size: glam::Vec2, ) -> anyhow::Result<()> { let device = &self.vk.device; let color_space = ColorSpace::Srgb; // TODO let pc = SurfacePC { src_pos: glam::Vec2::ZERO, src_size: glam::Vec2::ONE, dst_pos, dst_size, color_space: color_space.into(), }; // Push the texture. { let image_info = vk::DescriptorImageInfo::default() .image_layout(vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL) .image_view(view); let image_infos = [image_info]; let write = vk::WriteDescriptorSet::default() .dst_set(vk::DescriptorSet::null()) .dst_binding(0) .dst_array_element(0) .descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER) .image_info(&image_infos); let writes = [write]; unsafe { self.vk.push_ds_api.cmd_push_descriptor_set( cb, vk::PipelineBindPoint::GRAPHICS, self.pipeline_layout, 0, &writes, ); } } device.cmd_push_constants( cb, self.pipeline_layout, vk::ShaderStageFlags::VERTEX | vk::ShaderStageFlags::FRAGMENT, 0, std::slice::from_raw_parts( &pc as *const _ as *const u8, std::mem::size_of::(), ), ); device.cmd_draw(cb, 4, 1, 0, 0); Ok(()) } pub unsafe fn end_compositing(&self, cb: vk::CommandBuffer) { self.vk.device.cmd_end_rendering(cb); } } impl Drop for CompositePipeline { fn drop(&mut self) { let device = &self.vk.device; unsafe { device .queue_wait_idle(self.vk.graphics_queue.queue) .unwrap(); device.destroy_pipeline(self.pipeline, None); device.destroy_descriptor_set_layout(self.descriptor_set_layout, None); device.destroy_pipeline_layout(self.pipeline_layout, None); device.destroy_sampler(self.sampler, None); } } } ================================================ FILE: mm-server/src/session/video/composite.slang ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 import color; const Sampler2D texture; struct VertOutput { float2 uv : TextureCoord; float4 position : SV_Position; }; // This must match the enum in composite.rs. enum InputTextureColorSpace { SRGB = 0, LINEAR_EXTENDED_SRGB = 1, HDR10 = 2, } struct PushConstants { float2 src_pos; float2 src_size; float2 dst_pos; float2 dst_size; InputTextureColorSpace color_space; }; [[vk::push_constant]] PushConstants pc; [shader("vertex")] VertOutput vert(uint vid: SV_VertexID) { float2 corner; switch (vid % 4) { case 0: corner = float2(0.0, 0.0); break; case 1: corner = float2(1.0, 0.0); break; case 2: corner = float2(0.0, 1.0); break; case 3: corner = float2(1.0, 1.0); break; } VertOutput output; output.position = float4(pc.dst_pos + pc.dst_size * corner, 0.0, 1.0); output.uv = pc.src_pos + pc.src_size * corner; return output; } float3 linearize(float3 color, InputTextureColorSpace color_space) { switch (color_space) { case InputTextureColorSpace::SRGB: return srgb_eotf(color); case InputTextureColorSpace::LINEAR_EXTENDED_SRGB: return color; case InputTextureColorSpace::HDR10: float3 linear = pq_eotf(color); // The resulting values have the range 0-1, where 1.0 corresponds 10,000 // nits. In order to effectively blend with SDR textures, we need to // scale based on our virtual display brightness, producing values where // 1.0 matches the maximum brightness that SDR content would produce. We // use the Rec. 2408 value of 203 nits for this. On this scale, a value // of 300 nits would result in a scaled value of about 1.47, and 1.0 // would result in about 49.26. Either value would be clipped unless we // use a floating-point blend format (which we do). // TODO: allow HDR metadata to override the scaling factor. This is called // "nominal diffuse white level" or NDWL. linear *= PQ_MAX_WHITE / SDR_REFERENCE_WHITE; return transform(linear, Primaries::BT2020, Primaries::BT709); default: return srgb_eotf(color); } } [shader("fragment")] float4 frag(float2 uv: TextureCoord) : SV_Target { float4 color = texture.Sample(uv); // Wayland specifies that textures have premultiplied alpha. If we just // import a dmabuf as as an _SRGB format, the colors are wrong, since vulkan // expects sRGB textures to have not-premultiplied alpha. // // Vulkan normally expects to do the sRGB -> linear conversion when sampling // in the shader. However, we're bypassing that operation here, by importing // the texture as UNORM (even though it's stored as sRGB) and then doing the // conversion manually. if (color.a == 0) return float4(0); else if (pc.color_space == InputTextureColorSpace::LINEAR_EXTENDED_SRGB) // We're already in the right space for blending. return color; color.rgb /= color.a; color.rgb = linearize(color.rgb, pc.color_space); color.rgb *= color.a; return color; } ================================================ FILE: mm-server/src/session/video/convert.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::sync::Arc; use ash::vk; use tracing::instrument; use crate::{ color::{ColorSpace, VideoProfile}, vulkan::*, }; // Also defined in convert.slang. #[repr(u32)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] enum InputTextureColorSpace { Srgb = 0, LinearExtSrgb = 1, Hdr10 = 2, } impl From for InputTextureColorSpace { fn from(cs: ColorSpace) -> Self { match cs { ColorSpace::Srgb => InputTextureColorSpace::Srgb, ColorSpace::LinearExtSrgb => InputTextureColorSpace::LinearExtSrgb, ColorSpace::Hdr10 => InputTextureColorSpace::Hdr10, } } } // Also defined in convert.slang. #[repr(u32)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] enum OutputProfile { Hd = 0, Hdr10 = 1, } impl From for OutputProfile { fn from(profile: VideoProfile) -> Self { match profile { VideoProfile::Hd => OutputProfile::Hd, VideoProfile::Hdr10 => OutputProfile::Hdr10, } } } #[repr(C)] #[derive(Debug, Copy, Clone)] struct ConvertPushConstants { input_color_space: InputTextureColorSpace, output_profile: OutputProfile, } pub struct ConvertPipeline { semiplanar: bool, descriptor_set_layout: vk::DescriptorSetLayout, sampler: vk::Sampler, pipeline_layout: vk::PipelineLayout, pipeline: vk::Pipeline, vk: Arc, } impl ConvertPipeline { #[instrument(level = "trace", name = "ConvertPipeline::new", skip_all)] pub fn new(vk: Arc, semiplanar: bool) -> anyhow::Result { let shader = if semiplanar { load_shader( &vk.device, include_bytes!(concat!(env!("OUT_DIR"), "/shaders/convert_semiplanar.spv")), )? } else { load_shader( &vk.device, include_bytes!(concat!(env!("OUT_DIR"), "/shaders/convert_multiplanar.spv")), )? }; let sampler = { let create_info = vk::SamplerCreateInfo::default() .mag_filter(vk::Filter::LINEAR) .min_filter(vk::Filter::LINEAR) .address_mode_u(vk::SamplerAddressMode::REPEAT) .address_mode_v(vk::SamplerAddressMode::REPEAT) .address_mode_w(vk::SamplerAddressMode::REPEAT); unsafe { vk.device.create_sampler(&create_info, None)? } }; let descriptor_set_layout = unsafe { let samplers = [sampler]; let mut bindings = vec![ vk::DescriptorSetLayoutBinding::default() .binding(0) .descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER) .descriptor_count(1) .stage_flags(vk::ShaderStageFlags::COMPUTE) .immutable_samplers(&samplers), vk::DescriptorSetLayoutBinding::default() .binding(1) .descriptor_type(vk::DescriptorType::STORAGE_IMAGE) .descriptor_count(1) .stage_flags(vk::ShaderStageFlags::COMPUTE), vk::DescriptorSetLayoutBinding::default() .binding(2) .descriptor_type(vk::DescriptorType::STORAGE_IMAGE) .descriptor_count(1) .stage_flags(vk::ShaderStageFlags::COMPUTE), ]; if !semiplanar { bindings.push( vk::DescriptorSetLayoutBinding::default() .binding(3) .descriptor_type(vk::DescriptorType::STORAGE_IMAGE) .descriptor_count(1) .stage_flags(vk::ShaderStageFlags::COMPUTE), ); } vk.device.create_descriptor_set_layout( &vk::DescriptorSetLayoutCreateInfo::default().bindings(&bindings), None, )? }; let pipeline_layout = { let ranges = [vk::PushConstantRange::default() .stage_flags(vk::ShaderStageFlags::COMPUTE) .offset(0) .size(std::mem::size_of::() as u32)]; let set_layouts = [descriptor_set_layout]; let create_info = vk::PipelineLayoutCreateInfo::default() .set_layouts(&set_layouts) .push_constant_ranges(&ranges); unsafe { vk.device.create_pipeline_layout(&create_info, None)? } }; let pipeline = unsafe { let entry_point = std::ffi::CString::new("main")?; let stage = vk::PipelineShaderStageCreateInfo::default() .stage(vk::ShaderStageFlags::COMPUTE) .module(shader) .name(&entry_point); let create_info = vk::ComputePipelineCreateInfo::default() .stage(stage) .layout(pipeline_layout); let pipeline = match vk.device.create_compute_pipelines( vk::PipelineCache::null(), &[create_info], None, ) { Ok(pipelines) => pipelines[0], Err((_, e)) => return Err(e.into()), }; vk.device.destroy_shader_module(shader, None); pipeline }; Ok(Self { semiplanar, descriptor_set_layout, sampler, pipeline_layout, pipeline, vk, }) } pub unsafe fn cmd_convert( &self, cb: vk::CommandBuffer, width: u32, height: u32, descriptor_set: vk::DescriptorSet, input_color_space: ColorSpace, video_profile: VideoProfile, ) { self.vk .device .cmd_bind_pipeline(cb, vk::PipelineBindPoint::COMPUTE, self.pipeline); self.vk.device.cmd_bind_descriptor_sets( cb, vk::PipelineBindPoint::COMPUTE, self.pipeline_layout, 0, &[descriptor_set], &[], ); let pc = ConvertPushConstants { input_color_space: input_color_space.into(), output_profile: video_profile.into(), }; self.vk.device.cmd_push_constants( cb, self.pipeline_layout, vk::ShaderStageFlags::COMPUTE, 0, std::slice::from_raw_parts( &pc as *const _ as *const u8, std::mem::size_of::(), ), ); // Each workgroup has 16x16 invocations, covering a 32x32 area. let group_count_x = (width + 31) / 32; let group_count_y = (height + 31) / 32; self.vk .device .cmd_dispatch(cb, group_count_x, group_count_y, 1); } pub fn ds_for_conversion( &self, blend_image: &VkImage, planes: &[vk::ImageView], ) -> anyhow::Result { let set_layouts = [self.descriptor_set_layout]; let allocate_info = vk::DescriptorSetAllocateInfo::default() .descriptor_pool(self.vk.descriptor_pool) .set_layouts(&set_layouts); let ds = unsafe { self.vk .device .allocate_descriptor_sets(&allocate_info)? .pop() .unwrap() }; let blend_image_infos = [vk::DescriptorImageInfo::default() .image_layout(vk::ImageLayout::GENERAL) .image_view(blend_image.view)]; let blend_write = vk::WriteDescriptorSet::default() .dst_set(ds) .dst_binding(0) .dst_array_element(0) .descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER) .image_info(&blend_image_infos); let y_image_infos = [vk::DescriptorImageInfo::default() .image_layout(vk::ImageLayout::GENERAL) .image_view(planes[0])]; let y_write = vk::WriteDescriptorSet::default() .dst_set(ds) .dst_binding(1) .dst_array_element(0) .descriptor_type(vk::DescriptorType::STORAGE_IMAGE) .image_info(&y_image_infos); if self.semiplanar { let uv_image_infos = [vk::DescriptorImageInfo::default() .image_layout(vk::ImageLayout::GENERAL) .image_view(planes[1])]; let uv_write = vk::WriteDescriptorSet::default() .dst_set(ds) .dst_binding(2) .dst_array_element(0) .descriptor_type(vk::DescriptorType::STORAGE_IMAGE) .image_info(&uv_image_infos); let writes = [blend_write, y_write, uv_write]; unsafe { self.vk.device.update_descriptor_sets(&writes, &[]); } } else { let u_image_infos = [vk::DescriptorImageInfo::default() .image_layout(vk::ImageLayout::GENERAL) .image_view(planes[1])]; let u_write = vk::WriteDescriptorSet::default() .dst_set(ds) .dst_binding(2) .dst_array_element(0) .descriptor_type(vk::DescriptorType::STORAGE_IMAGE) .image_info(&u_image_infos); let v_image_infos = [vk::DescriptorImageInfo::default() .image_layout(vk::ImageLayout::GENERAL) .image_view(planes[2])]; let v_write = vk::WriteDescriptorSet::default() .dst_set(ds) .dst_binding(3) .dst_array_element(0) .descriptor_type(vk::DescriptorType::STORAGE_IMAGE) .image_info(&v_image_infos); let writes = [blend_write, y_write, u_write, v_write]; unsafe { self.vk.device.update_descriptor_sets(&writes, &[]); } } Ok(ds) } } impl Drop for ConvertPipeline { fn drop(&mut self) { let device = &self.vk.device; unsafe { device .queue_wait_idle(self.vk.graphics_queue.queue) .unwrap(); device.destroy_sampler(self.sampler, None); device.destroy_pipeline(self.pipeline, None); device.destroy_pipeline_layout(self.pipeline_layout, None); device.destroy_descriptor_set_layout(self.descriptor_set_layout, None); } } } ================================================ FILE: mm-server/src/session/video/convert.slang ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 import color; const Sampler2D blend_image; const RWTexture2D luminance; #ifdef SEMIPLANAR const RWTexture2D chroma_uv; #else const RWTexture2D chroma_u; const RWTexture2D chroma_v; #endif // This must match the enum in convert.rs. enum InputTextureColorSpace { SRGB = 0, LINEAR_EXTENDED_SRGB = 1, HDR10 = 2, } /// This must match the enum in convert.rs. enum OutputProfile { HD = 0, HDR10 = 1, } struct PushConstants { InputTextureColorSpace input_color_space; OutputProfile output_profile; } [[vk::push_constant]] PushConstants pc; float3 to_bt709(float3 rgb, InputTextureColorSpace color_space) { float3 linear; switch (color_space) { case InputTextureColorSpace::SRGB: linear = srgb_eotf(rgb); break; case InputTextureColorSpace::HDR10: { // Treat 203 nits as 1.0, and clip everything above that. linear = pq_eotf(rgb); linear = clamp(linear * (PQ_MAX_WHITE / SDR_REFERENCE_WHITE), 0.0, 1.0); break; } case InputTextureColorSpace::LINEAR_EXTENDED_SRGB: linear = clamp(rgb, 0.0, 1.0); break; } return bt709_inverse_eotf(linear); } float3 to_bt2020_pq(float3 rgb, InputTextureColorSpace color_space) { float3 bt2020_linear; switch (color_space) { case InputTextureColorSpace::SRGB: bt2020_linear = transform(srgb_eotf(rgb), Primaries::BT709, Primaries::BT2020); break; case InputTextureColorSpace::LINEAR_EXTENDED_SRGB: bt2020_linear = transform(rgb, Primaries::BT709, Primaries::BT2020); break; case InputTextureColorSpace::HDR10: // Happy identity path. return rgb; } // Tone-map 1.0 to 203 nits, then delinearize. return clamp(pq_inverse_eotf(bt2020_linear * (SDR_REFERENCE_WHITE / PQ_MAX_WHITE)), 0.0, 1.0); } [shader("compute")] [numthreads(16, 16)] void main(uint2 self_id: SV_DispatchThreadID) { let coords = uint2(self_id.x * 2, self_id.y * 2); let chroma_coords = coords / 2; int j, k; float us[4]; float vs[4]; for (k = 0; k < 2; k += 1) { for (j = 0; j < 2; j += 1) { let texel_coords = coords + uint2(j, k); float4 texel = blend_image.Load(uint3(texel_coords, 0)); float3 yuv; switch (pc.output_profile) { case OutputProfile::HD: yuv = encode_ycbcr(to_bt709(texel.rgb, pc.input_color_space), YCbCrModel::BT709, false); break; case OutputProfile::HDR10: yuv = encode_ycbcr(to_bt2020_pq(texel.rgb, pc.input_color_space), YCbCrModel::BT2020, false); break; } luminance[texel_coords] = yuv.x; int i = k * 2 + j; us[i] = yuv.y; vs[i] = yuv.z; } } let u = lerp(lerp(us[0], us[1], 0.5), lerp(us[2], us[3], 0.5), 0.5); let v = lerp(lerp(vs[0], vs[1], 0.5), lerp(vs[2], vs[3], 0.5), 0.5); #ifdef SEMIPLANAR chroma_uv[chroma_coords] = float2(u, v); #else chroma_u[chroma_coords] = u; chroma_v[chroma_coords] = v; #endif } ================================================ FILE: mm-server/src/session/video.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{mem::ManuallyDrop, sync::Arc}; use anyhow::anyhow; use ash::vk; mod composite; mod convert; use tracing::{instrument, trace, trace_span, warn}; use super::{ compositor::{self, buffers::SyncobjTimelinePoint}, DisplayParams, SessionHandle, VideoStreamParams, }; use crate::{ color::ColorSpace, encoder::{self}, session::EPOCH, vulkan::*, }; struct Sink(SessionHandle); impl encoder::Sink for Sink { fn write_frame( &mut self, ts: std::time::Instant, frame: bytes::Bytes, hierarchical_layer: u32, is_keyframe: bool, ) { let pts = (ts - *EPOCH).as_millis() as u64; self.0 .dispatch_video_frame(pts, frame, hierarchical_layer, is_keyframe); // Wake the compositor, so it can release buffers and send presentation // feedback. let _ = self.0.wake(); } } pub struct SwapFrame { convert_ds: vk::DescriptorSet, // Should be dropped first. draws: Vec<(vk::ImageView, glam::Vec2, glam::Vec2)>, texture_semas: Vec, // Reused each frame. texture_semas_used: usize, /// An RGBA image to composite to. blend_image: VkImage, /// A YUV image we copy to before passing on to the encoder. encode_image: VkImage, plane_views: Vec, staging_cb: vk::CommandBuffer, render_cb: vk::CommandBuffer, use_staging: bool, timeline: VkTimelineSemaphore, tp_staging_done: VkTimelinePoint, tp_render_done: VkTimelinePoint, tp_clear: VkTimelinePoint, // For tracing. staging_ts_pool: VkTimestampQueryPool, staging_span: Option, render_ts_pool: VkTimestampQueryPool, render_span: Option, } pub enum TextureSync { Explicit(SyncobjTimelinePoint), ImplicitInterop, } pub struct EncodePipeline { display_params: DisplayParams, streaming_params: VideoStreamParams, composite_pipeline: composite::CompositePipeline, convert_pipeline: convert::ConvertPipeline, encoder: ManuallyDrop, swap: [SwapFrame; 2], swap_idx: usize, vk: Arc, } impl EncodePipeline { #[instrument(level = "trace", skip_all)] pub fn new( vk: Arc, compositor_handle: SessionHandle, display_params: DisplayParams, streaming_params: VideoStreamParams, ) -> anyhow::Result { if streaming_params.width != display_params.width || streaming_params.height != display_params.height { trace!( ?streaming_params, ?display_params, "stream and display params differ" ); // Superres is not implemented yet. unimplemented!() } let sink = Sink(compositor_handle); let mut encoder = encoder::Encoder::new(vk.clone(), streaming_params, display_params.framerate, sink)?; let encode_format = encoder.input_format(); let composite_pipeline = composite::CompositePipeline::new(vk.clone())?; let convert_pipeline = convert::ConvertPipeline::new(vk.clone(), format_is_semiplanar(encode_format))?; let swap = [ new_swapframe(vk.clone(), encoder.create_input_image()?, &convert_pipeline)?, new_swapframe(vk.clone(), encoder.create_input_image()?, &convert_pipeline)?, ]; Ok(Self { display_params, streaming_params, composite_pipeline, convert_pipeline, encoder: ManuallyDrop::new(encoder), swap, swap_idx: 0, vk, }) } // pub fn encode_single_surface(&mut self, surface: wl_surface::WlSurface) { // todo!() // } #[instrument(level = "trace", skip_all)] pub unsafe fn begin(&mut self) -> anyhow::Result { let device = &self.vk.device; let frame = &mut self.swap[self.swap_idx]; let ready = frame.tp_clear.poll()?; // If the previous frame isn't ready, drop this one to let the app // catch up. if !ready { return Ok(false); } // Trace on on the GPU side. if let Some(ref ctx) = self.vk.graphics_queue.tracy_context { if let Some(span) = frame.staging_span.take() { let timestamps = frame.staging_ts_pool.fetch_results(device)?; span.upload_timestamp(timestamps[0], timestamps[1]); } if let Some(span) = frame.render_span.take() { let timestamps = frame.render_ts_pool.fetch_results(device)?; span.upload_timestamp(timestamps[0], timestamps[1]); } // We conditionally create the staging span, below. Rendering always happens. frame.render_span = Some(ctx.span(tracy_client::span_location!("render"))?); } frame.texture_semas_used = 0; frame.tp_staging_done += 10; frame.tp_render_done = &frame.tp_staging_done + 1; frame.tp_clear = &frame.tp_render_done + 1; frame.use_staging = false; begin_command_buffer(device, frame.staging_cb)?; begin_command_buffer(device, frame.render_cb)?; // Record the start timestamp. frame.render_ts_pool.cmd_reset(device, frame.render_cb); device.cmd_write_timestamp( frame.render_cb, vk::PipelineStageFlags::TOP_OF_PIPE, frame.render_ts_pool.pool, 0, ); // Transition the blend image to be writable. insert_image_barrier( device, frame.render_cb, frame.blend_image.image, None, vk::ImageLayout::UNDEFINED, vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL, vk::PipelineStageFlags2::NONE, vk::AccessFlags2::NONE, vk::PipelineStageFlags2::COLOR_ATTACHMENT_OUTPUT, vk::AccessFlags2::COLOR_ATTACHMENT_WRITE, ); Ok(true) } /// Adds a surface to be drawn. Returns the timeline point when the texture /// will no longer be in use. A return value of None indicates the texture /// is already safe to reuse. #[instrument(level = "trace", skip_all)] pub unsafe fn composite_surface( &mut self, texture: &compositor::buffers::Buffer, sync: Option, dest: compositor::surface::SurfaceConfiguration, ) -> anyhow::Result> { let device = &self.vk.device; let frame = &mut self.swap[self.swap_idx]; let (view, release) = match &texture.backing { compositor::buffers::BufferBacking::Shm { dirty, staging_buffer, image, format, .. } => { if *dirty { // We only set up tracing for the staging command buffer if // we're actually going to use it. if !frame.use_staging { if let Some(ref ctx) = self.vk.graphics_queue.tracy_context { frame.staging_span = Some(ctx.span(tracy_client::span_location!())?); } // Record the start timestamp. frame.staging_ts_pool.cmd_reset(device, frame.staging_cb); device.cmd_write_timestamp( frame.staging_cb, vk::PipelineStageFlags::TOP_OF_PIPE, frame.staging_ts_pool.pool, 0, ); } frame.use_staging = true; // Transfer the image to be writable. The upload happens // in the staging command buffer. insert_image_barrier( device, frame.staging_cb, image.image, None, vk::ImageLayout::UNDEFINED, vk::ImageLayout::TRANSFER_DST_OPTIMAL, vk::PipelineStageFlags2::NONE, vk::AccessFlags2::NONE, vk::PipelineStageFlags2::TRANSFER, vk::AccessFlags2::TRANSFER_WRITE, ); // Upload from the staging buffer to the texture. cmd_upload_shm( device, frame.staging_cb, staging_buffer, image, format.stride / format.bpp as u32, format.height, ); } // Transition the image to be readable (in the second command buffer). insert_image_barrier( device, frame.render_cb, image.image, None, vk::ImageLayout::TRANSFER_DST_OPTIMAL, vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL, vk::PipelineStageFlags2::TRANSFER, vk::AccessFlags2::TRANSFER_WRITE, vk::PipelineStageFlags2::FRAGMENT_SHADER, vk::AccessFlags2::SHADER_READ, ); assert!(sync.is_none()); (image.view, None) } compositor::buffers::BufferBacking::Dmabuf { image, fd, .. } => { // Transition the image to be readable. A special queue, // EXTERNAL, is used in a queue transfer to indicate // acquiring the texture from the wayland client. insert_image_barrier( device, frame.render_cb, image.image, Some((vk::QUEUE_FAMILY_FOREIGN_EXT, self.vk.graphics_queue.family)), vk::ImageLayout::GENERAL, vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL, vk::PipelineStageFlags2::NONE, vk::AccessFlags2::NONE, vk::PipelineStageFlags2::FRAGMENT_SHADER, vk::AccessFlags2::SHADER_READ, ); // Release the image at the end. insert_image_barrier( device, frame.render_cb, image.image, Some((self.vk.graphics_queue.family, vk::QUEUE_FAMILY_FOREIGN_EXT)), vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL, vk::ImageLayout::GENERAL, vk::PipelineStageFlags2::ALL_GRAPHICS, vk::AccessFlags2::SHADER_READ, vk::PipelineStageFlags2::NONE, vk::AccessFlags2::NONE, ); if let Some(sync) = sync { let sema = allocate_texture_semaphore(self.vk.clone(), frame)?; match sync { TextureSync::Explicit(syncobj) => { syncobj.import_as_semaphore(sema)?; } TextureSync::ImplicitInterop => { compositor::buffers::import_dmabuf_fence_as_semaphore( self.vk.clone(), sema, fd, )?; } } } (image.view, Some(frame.tp_render_done.clone())) } }; // Convert the destination rect into clip coordinates. let display_size: glam::UVec2 = (self.display_params.width, self.display_params.height).into(); let dst_pos = (dest.topleft.as_vec2() / display_size.as_vec2() * 2.0) - 1.0; let dst_size = dest.size.as_vec2() / display_size.as_vec2() * 2.0; // Draw. frame.draws.push((view, dst_pos, dst_size)); Ok(release) } /// End the current frame and submit it to the GPU. Returns the timeline /// point indicating when rendering and encoding have both completed. #[instrument(skip_all)] pub unsafe fn end_and_submit(&mut self) -> anyhow::Result { let device = &self.vk.device; let frame = &mut self.swap[self.swap_idx]; // Collate draw calls. We don't do this as we go because we need to do // all the sync outside of a dynamic rendering pass. self.composite_pipeline .begin_compositing(frame.render_cb, &frame.blend_image); for (view, dst_pos, dst_size) in frame.draws.drain(..) { self.composite_pipeline .composite_surface(frame.render_cb, view, dst_pos, dst_size)?; } self.composite_pipeline.end_compositing(frame.render_cb); // Transition the blend image to be readable. insert_image_barrier( device, frame.render_cb, frame.blend_image.image, None, vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL, vk::ImageLayout::GENERAL, vk::PipelineStageFlags2::COLOR_ATTACHMENT_OUTPUT, vk::AccessFlags2::COLOR_ATTACHMENT_WRITE, vk::PipelineStageFlags2::COMPUTE_SHADER, vk::AccessFlags2::SHADER_STORAGE_READ, ); // Acquire the encode image from the encode queue, but not for the // first frame. if frame.tp_clear.value() > 20 { let src_queue_family = self.vk.encode_queue.as_ref().unwrap().family; insert_image_barrier( device, frame.render_cb, frame.encode_image.image, Some((src_queue_family, self.vk.graphics_queue.family)), vk::ImageLayout::VIDEO_ENCODE_SRC_KHR, vk::ImageLayout::GENERAL, vk::PipelineStageFlags2::NONE, vk::AccessFlags2::NONE, vk::PipelineStageFlags2::COMPUTE_SHADER, vk::AccessFlags2::SHADER_STORAGE_WRITE, ); } else { // Otherwise, just transition the image to be writable. insert_image_barrier( device, frame.render_cb, frame.encode_image.image, None, vk::ImageLayout::UNDEFINED, vk::ImageLayout::GENERAL, vk::PipelineStageFlags2::NONE, vk::AccessFlags2::NONE, vk::PipelineStageFlags2::COMPUTE_SHADER, vk::AccessFlags2::SHADER_STORAGE_WRITE, ); } // We're converting the blend image, which is scRGB. let input_color_space = ColorSpace::LinearExtSrgb; self.convert_pipeline.cmd_convert( frame.render_cb, frame.blend_image.width, frame.blend_image.height, frame.convert_ds, input_color_space, self.streaming_params.profile, ); // Transfer to the encode queue. let dst_queue_family = self.vk.encode_queue.as_ref().unwrap().family; insert_image_barrier( device, frame.render_cb, frame.encode_image.image, Some((self.vk.graphics_queue.family, dst_queue_family)), vk::ImageLayout::GENERAL, vk::ImageLayout::VIDEO_ENCODE_SRC_KHR, vk::PipelineStageFlags2::COMPUTE_SHADER, vk::AccessFlags2::SHADER_STORAGE_WRITE, vk::PipelineStageFlags2::empty(), vk::AccessFlags2::empty(), ); let mut submits = Vec::new(); let staging_cb_infos = [vk::CommandBufferSubmitInfoKHR::default().command_buffer(frame.staging_cb)]; let staging_signal_infos = [vk::SemaphoreSubmitInfo::default() .semaphore(frame.timeline.as_semaphore()) .stage_mask(vk::PipelineStageFlags2::ALL_COMMANDS) .value(frame.tp_staging_done.value())]; let staging_submit_info = vk::SubmitInfo2::default() .command_buffer_infos(&staging_cb_infos) .signal_semaphore_infos(&staging_signal_infos); // Only submit the staging cb if we actually recorded commands to it. if frame.use_staging { // Record the end timestamp. device.cmd_write_timestamp( frame.staging_cb, vk::PipelineStageFlags::ALL_COMMANDS, frame.staging_ts_pool.pool, 1, ); if let Some(span) = &mut frame.staging_span { span.end_zone(); } device.end_command_buffer(frame.staging_cb)?; submits.push(staging_submit_info); } else { frame.tp_staging_done.signal()?; } // Record the end timestamp. device.cmd_write_timestamp( frame.render_cb, vk::PipelineStageFlags::ALL_COMMANDS, frame.render_ts_pool.pool, 1, ); if let Some(span) = &mut frame.render_span { span.end_zone(); } device.end_command_buffer(frame.render_cb)?; let render_cb_infos = [vk::CommandBufferSubmitInfoKHR::default().command_buffer(frame.render_cb)]; let mut render_wait_infos = vec![vk::SemaphoreSubmitInfo::default() .semaphore(frame.timeline.as_semaphore()) .stage_mask(vk::PipelineStageFlags2::FRAGMENT_SHADER) .value(frame.tp_staging_done.value())]; let render_signal_infos = [vk::SemaphoreSubmitInfo::default() .semaphore(frame.timeline.as_semaphore()) .stage_mask(vk::PipelineStageFlags2::ALL_COMMANDS) .value(frame.tp_render_done.value())]; for sema in &frame.texture_semas[0..frame.texture_semas_used] { render_wait_infos.push( vk::SemaphoreSubmitInfo::default() .semaphore(*sema) .stage_mask(vk::PipelineStageFlags2::FRAGMENT_SHADER), ); } let render_submit_info = vk::SubmitInfo2::default() .command_buffer_infos(&render_cb_infos) .wait_semaphore_infos(&render_wait_infos) .signal_semaphore_infos(&render_signal_infos); submits.push(render_submit_info); trace_span!("queue_submit2").in_scope(|| { device.queue_submit2(self.vk.graphics_queue.queue, &submits, vk::Fence::null()) })?; // Trigger encode. self.encoder.submit_encode( &frame.encode_image, frame.tp_render_done.clone(), frame.tp_clear.clone(), )?; // Wait for uploads to finish before returning, so that writes to the // staging buffers are synchronized. trace_span!("tp_staging_done.wait").in_scope(|| frame.tp_staging_done.wait())?; let tp_clear = frame.tp_clear.clone(); let swap_len = self.swap.len(); self.swap_idx = (self.swap_idx + 1) % swap_len; Ok(tp_clear) } pub fn request_refresh(&mut self) { self.encoder.request_refresh() } } impl Drop for EncodePipeline { fn drop(&mut self) { let device = &self.vk.device; // Drop the encoder, since it consumes some of the shared resources below. unsafe { ManuallyDrop::drop(&mut self.encoder); } unsafe { device.device_wait_idle().unwrap(); for frame in self.swap.iter() { device.free_command_buffers( self.vk.graphics_queue.command_pool, &[frame.staging_cb, frame.render_cb], ); for view in &frame.plane_views { device.destroy_image_view(*view, None); } for sema in &frame.texture_semas { device.destroy_semaphore(*sema, None); } device.destroy_query_pool(frame.render_ts_pool.pool, None); device.destroy_query_pool(frame.staging_ts_pool.pool, None); } } } } fn new_swapframe( vk: Arc, encode_image: VkImage, convert_pipeline: &convert::ConvertPipeline, ) -> anyhow::Result { let blend_image = VkImage::new( vk.clone(), composite::BLEND_FORMAT, false, encode_image.width, encode_image.height, vk::ImageUsageFlags::COLOR_ATTACHMENT | vk::ImageUsageFlags::SAMPLED, vk::SharingMode::EXCLUSIVE, vk::ImageCreateFlags::empty(), )?; let mut plane_views = Vec::new(); let (single_plane_format, double_plane_format) = disjoint_plane_formats(encode_image.format) .ok_or(anyhow!( "couldn't find a disjoint plane formats for {:?}", encode_image.format ))?; let disjoint_formats = if format_is_semiplanar(encode_image.format) { vec![ single_plane_format, // Y double_plane_format, // UV ] } else { vec![ single_plane_format, // Y single_plane_format, // U single_plane_format, // V ] }; let aspects = [ vk::ImageAspectFlags::PLANE_0, vk::ImageAspectFlags::PLANE_1, vk::ImageAspectFlags::PLANE_2, ]; for (idx, format) in disjoint_formats.into_iter().enumerate() { let mut usage_info = vk::ImageViewUsageCreateInfo::default().usage(vk::ImageUsageFlags::STORAGE); let create_info = vk::ImageViewCreateInfo::default() .image(encode_image.image) .view_type(vk::ImageViewType::TYPE_2D) .format(format) .components(vk::ComponentMapping { r: vk::ComponentSwizzle::IDENTITY, g: vk::ComponentSwizzle::IDENTITY, b: vk::ComponentSwizzle::IDENTITY, a: vk::ComponentSwizzle::IDENTITY, }) .subresource_range(vk::ImageSubresourceRange { aspect_mask: aspects[idx], base_mip_level: 0, level_count: 1, base_array_layer: 0, layer_count: 1, }) .push_next(&mut usage_info); let view = unsafe { vk.device.create_image_view(&create_info, None)? }; plane_views.push(view); } let convert_ds = convert_pipeline.ds_for_conversion(&blend_image, &plane_views)?; let staging_ts_pool = create_timestamp_query_pool(&vk.device, 2)?; let render_ts_pool = create_timestamp_query_pool(&vk.device, 2)?; let timeline = VkTimelineSemaphore::new(vk.clone(), 0)?; Ok(SwapFrame { convert_ds, texture_semas: Vec::new(), texture_semas_used: 0, draws: Vec::new(), blend_image, encode_image, plane_views, staging_cb: allocate_command_buffer(&vk.device, vk.graphics_queue.command_pool)?, render_cb: allocate_command_buffer(&vk.device, vk.graphics_queue.command_pool)?, use_staging: false, timeline: timeline.clone(), tp_staging_done: timeline.new_point(0), tp_render_done: timeline.new_point(0), tp_clear: timeline.new_point(0), staging_ts_pool, staging_span: None, render_ts_pool, render_span: None, }) } fn allocate_texture_semaphore( vk: Arc, frame: &mut SwapFrame, ) -> anyhow::Result { let idx = frame.texture_semas_used; frame.texture_semas_used += 1; if frame.texture_semas_used <= frame.texture_semas.len() { return Ok(frame.texture_semas[idx]); } let sema = unsafe { vk.device .create_semaphore(&vk::SemaphoreCreateInfo::default(), None)? }; frame.texture_semas.push(sema); Ok(sema) } fn format_is_semiplanar(format: vk::Format) -> bool { // grep for 2PLANE in the vulkan spec. matches!( format, vk::Format::G8_B8R8_2PLANE_420_UNORM | vk::Format::G8_B8R8_2PLANE_422_UNORM | vk::Format::G8_B8R8_2PLANE_444_UNORM | vk::Format::G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 | vk::Format::G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 | vk::Format::G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16 | vk::Format::G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 | vk::Format::G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 | vk::Format::G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16 | vk::Format::G16_B16R16_2PLANE_420_UNORM | vk::Format::G16_B16R16_2PLANE_422_UNORM | vk::Format::G16_B16R16_2PLANE_444_UNORM ) } pub unsafe fn cmd_upload_shm( device: &ash::Device, cb: vk::CommandBuffer, buffer: &VkHostBuffer, image: &VkImage, stride: u32, // In texels. height: u32, // In texels. ) { let region = vk::BufferImageCopy::default() .buffer_row_length(stride) .buffer_image_height(height) .image_subresource(vk::ImageSubresourceLayers { aspect_mask: vk::ImageAspectFlags::COLOR, mip_level: 0, base_array_layer: 0, layer_count: 1, }) .image_extent(vk::Extent3D { width: image.width, height: image.height, depth: 1, }); let regions = [region]; device.cmd_copy_buffer_to_image( cb, buffer.buffer, image.image, vk::ImageLayout::TRANSFER_DST_OPTIMAL, ®ions, ); } fn disjoint_plane_formats(format: vk::Format) -> Option<(vk::Format, vk::Format)> { match format { vk::Format::G8_B8R8_2PLANE_420_UNORM | vk::Format::G8_B8R8_2PLANE_422_UNORM | vk::Format::G8_B8R8_2PLANE_444_UNORM | vk::Format::G8_B8_R8_3PLANE_420_UNORM | vk::Format::G8_B8_R8_3PLANE_422_UNORM | vk::Format::G8_B8_R8_3PLANE_444_UNORM => { Some((vk::Format::R8_UNORM, vk::Format::R8G8_UNORM)) } vk::Format::G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 | vk::Format::G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 | vk::Format::G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16 | vk::Format::G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 | vk::Format::G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 | vk::Format::G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 => Some(( vk::Format::R10X6_UNORM_PACK16, vk::Format::R10X6G10X6_UNORM_2PACK16, )), vk::Format::G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 | vk::Format::G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 | vk::Format::G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16 | vk::Format::G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 | vk::Format::G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 | vk::Format::G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 => Some(( vk::Format::R12X4_UNORM_PACK16, vk::Format::R12X4G12X4_UNORM_2PACK16, )), vk::Format::G16_B16R16_2PLANE_420_UNORM | vk::Format::G16_B16R16_2PLANE_422_UNORM | vk::Format::G16_B16R16_2PLANE_444_UNORM | vk::Format::G16_B16_R16_3PLANE_420_UNORM | vk::Format::G16_B16_R16_3PLANE_422_UNORM | vk::Format::G16_B16_R16_3PLANE_444_UNORM => { Some((vk::Format::R16_UNORM, vk::Format::R16G16_UNORM)) } _ => None, } } ================================================ FILE: mm-server/src/session.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{path::PathBuf, sync::Arc, time}; use anyhow::{anyhow, bail}; use crossbeam_channel as crossbeam; use mm_protocol as protocol; use pathsearch::find_executable_in_path; use tracing::{debug_span, info}; use crate::{ codec::probe_codec, server::stream::StreamWriter, vulkan::VkContext, waking_sender::WakingSender, }; mod audio; pub mod compositor; pub mod control; mod handle; mod input; mod reactor; mod video; use control::{AudioStreamParams, ControlMessage, DisplayParams, SessionEvent, VideoStreamParams}; pub use handle::SessionHandle; pub use input::GamepadLayout; use reactor::Reactor; pub use reactor::EPOCH; /// How long to wait for the compositor to accept a new attachment. const ATTACH_TIMEOUT: time::Duration = time::Duration::from_secs(10); pub struct Session { pub id: u64, pub display_params: DisplayParams, pub application_id: String, pub started: time::SystemTime, pub detached_since: Option, pub permanent_gamepads: Vec, pub defunct: bool, comp_thread_handle: std::thread::JoinHandle>, control_sender: WakingSender, operator_attachment_id: Option, pub bug_report_dir: Option, vk: Arc, } pub struct Attachment { pub session_id: u64, pub attachment_id: u64, pub events: crossbeam::Receiver, pub control: WakingSender, } impl Session { /// Launches a standalone compositor and the application process. Blocks /// until both have started up and connected over a unix socket. pub fn launch( vk: Arc, id: u64, application_id: &str, application_config: &super::config::AppConfig, display_params: DisplayParams, permanent_gamepads: Vec, bug_report_dir: Option, ) -> anyhow::Result { // Do an early check that the executable exists. let exe = application_config.command.first().unwrap(); find_executable_in_path(exe).ok_or(anyhow!("command {:?} not in PATH", exe))?; // Launch the compositor, which in turn launches the app. let (ready_send, ready_recv) = oneshot::channel(); let vk_clone = vk.clone(); let app_name = application_id.to_owned(); let app_cfg = application_config.clone(); let gamepads = permanent_gamepads .iter() .map(|pad| (pad.id, GamepadLayout::GenericDualStick)) // TODO layout. .collect(); let bug_report_dir_clone = bug_report_dir.clone(); let comp_thread_handle = std::thread::spawn(move || { tracy_client::set_thread_name!("compositor"); let span = debug_span!("session", session_id = id, app = app_name); let _guard = span.enter(); Reactor::run( vk_clone, app_cfg, display_params, gamepads, bug_report_dir_clone, ready_send, ) }); info!(session_id = id, application = ?application_id, "launching session"); // Wait until the compositor is ready. let control_sender = match ready_recv.recv() { Ok(s) => s, Err(_) => { return match comp_thread_handle.join() { Ok(Ok(())) => Err(anyhow!("compositor thread exited unexpectedly")), Ok(Err(e)) => Err(e), Err(_) => Err(anyhow!("compositor thread panicked")), } } }; Ok(Self { id, application_id: application_id.to_string(), display_params, permanent_gamepads, started: time::SystemTime::now(), defunct: false, detached_since: None, operator_attachment_id: None, comp_thread_handle, control_sender, bug_report_dir, vk, }) } pub fn update_display_params(&mut self, display_params: DisplayParams) -> anyhow::Result<()> { if self.defunct { return Err(anyhow!("session defunct")); } match self .control_sender .send(ControlMessage::UpdateDisplayParams(display_params)) { Ok(_) => { self.display_params = display_params; Ok(()) } Err(crossbeam::SendError(_)) => { self.defunct = true; Err(anyhow!("compositor died")) } } } pub fn attach( &mut self, id: u64, operator: bool, video_params: VideoStreamParams, audio_params: AudioStreamParams, stream_writer: StreamWriter, ) -> anyhow::Result { if self.defunct { return Err(anyhow!("session defunct")); } else if !operator { unimplemented!() } else if self.operator_attachment_id.is_some() { return Err(anyhow!("session already has an operator")); } info!( session_id = self.id, attachment_id = id, operator, "new attachment" ); let (events_send, events_recv) = crossbeam_channel::unbounded(); let (ready_send, ready_recv) = oneshot::channel(); if self .control_sender .send(ControlMessage::Attach { id, sender: events_send, video_params, audio_params, stream_writer, ready: ready_send, }) .is_err() { self.defunct = true; bail!("compositor died"); } if ready_recv.recv_timeout(ATTACH_TIMEOUT).is_err() { let _ = self.control_sender.send(ControlMessage::Detach(id)); bail!("attachment rejected"); } self.operator_attachment_id = Some(id); self.detached_since = None; Ok(Attachment { session_id: self.id, attachment_id: id, events: events_recv, control: self.control_sender.clone(), }) } pub fn detach(&mut self, attachment: Attachment) -> anyhow::Result<()> { if self.defunct { return Err(anyhow!("session defunct")); } self.operator_attachment_id = None; self.detached_since = Some(time::Instant::now()); match self .control_sender .send(ControlMessage::Detach(attachment.attachment_id)) { Ok(_) => Ok(()), Err(crossbeam::SendError(_)) => { self.defunct = true; Err(anyhow!("compositor died")) } } } pub fn stop(self) -> anyhow::Result<()> { if let Err(crossbeam::TrySendError::Full(_)) = self.control_sender.try_send(ControlMessage::Stop) { bail!("compositor channel full"); } match self.comp_thread_handle.join() { Ok(Ok(())) => Ok(()), Ok(Err(e)) => Err(e), Err(v) => Err(anyhow!("compositor thread panicked: {:?}", v)), } } pub fn supports_stream(&self, params: VideoStreamParams) -> bool { if params.width != self.display_params.width || params.height != self.display_params.height { return false; } probe_codec(self.vk.clone(), params.codec) } } ================================================ FILE: mm-server/src/state.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::sync::Arc; use hashbrown::HashMap; use parking_lot::Mutex; use tracing::{error, info}; use crate::config::Config; use crate::{session::Session, vulkan::VkContext}; pub type SharedState = Arc>; pub struct ServerState { // TODO: we'd rather use a BTreeMap, but we want // hash_brown::HashMap::extract_if. pub sessions: HashMap, pub session_seq: usize, pub id_generator: tiny_id::ShortCodeGenerator, pub cfg: Config, pub vk: Arc, } impl ServerState { pub fn new(vk: Arc, cfg: Config) -> Self { Self { vk, cfg, sessions: HashMap::new(), session_seq: 0, id_generator: tiny_id::ShortCodeGenerator::new_numeric(6), } } pub fn generate_session_id(&mut self) -> (usize, u64) { let seq = self.session_seq; self.session_seq += 1; (seq, self.id_generator.next_int()) } /// Run periodic cleanup, e.g. ending defunct sessions. pub fn tick(&mut self) -> anyhow::Result<()> { self.sessions .extract_if(|_, s| { if s.defunct { info!("cleaning up defunct session {}", s.id); return true; } let session_timeout = self.cfg.apps[&s.application_id].session_timeout; if s.detached_since .zip(session_timeout) .is_some_and(|(t, timeout)| t.elapsed() > timeout) { info!("cleaning up idle session {}", s.id); true } else { false } }) .for_each(|(_, s)| match s.stop() { Ok(()) => {} Err(e) => { error!("session ended with error: {:#}", e); } }); Ok(()) } } ================================================ FILE: mm-server/src/vulkan/chain.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 /// Used to construct a pinned chain of vulkan structures. /// /// Ash provides the builder pattern for generating temporary input structs on /// the stack, but it doesn't work well with structs stored on the heap for /// re-use. Part of the reason for that is that the `p_next` pointer mechanism /// is out of scope of the borrow checker. /// /// If we want to store a chain of structs in a `Box`, we should also `Pin` it, /// since the holding struct is effectively self-referential. This macro handles /// the boilerplate for that, by: /// /// - Generating a constructor for the struct that returns `Pin>` /// - Generating `AsRef` and `AsMut` for the first struct in the chain /// - Generating `with_foo` methods that allow you to replace one struct in the /// chain (presumably using the builder pattern). /// /// Besides letting us reuse allocations for heavy structs, this also achieves a /// level of polymorphism, since calling code can take an `impl AsRef` where /// `T` is the first struct, and generalize over the remaining chain. /// /// Note that the chain is always created and maintained in declaration order, /// with the first field being the "head" and the head's `p_next` pointer /// pointing to the second field, and so on. macro_rules! vk_chain { ( $(#[$meta:meta])* $vis:vis struct $Chain:ident <$lifetime:lifetime> { $(#[$head_meta:meta])* pub $HeadName:ident: $HeadStruct:ty, $( $(#[$field_meta:meta])* pub $Name:ident: $Struct:ty, )+ } ) => { paste::paste! { $(#[$meta])* $vis struct [<$Chain Inner>] <$lifetime> { $(#[$head_meta])* pub $HeadName: $HeadStruct, $( $(#[$field_meta])* pub $Name: $Struct, )* } $vis struct $Chain(std::pin::Pin] <'static> >>); unsafe impl Send for $Chain {} #[allow(dead_code)] impl $Chain { pub fn new<$lifetime: 'static>($HeadName: $HeadStruct, $($Name: $Struct,)*) -> Self { let mut ch = Box::pin([<$Chain Inner>] { $HeadName, $($Name,)* }); __set_p_next!(ch, $HeadName, $($Name),*); Self(ch) } $( #[doc = "Replaces the `" $Name "` field with the new (or modified) struct returned by `f`. Maintains the `p_next` chain."] pub fn []<$lifetime: 'static, F>(&mut self, f: F) where F: FnOnce($Struct) -> $Struct, { let p_next = self.0.$Name.p_next; self.0.$Name = f(self.$Name); self.0.$Name.p_next = p_next; } )* } impl Default for $Chain { fn default() -> Self { Self::new(__replace_expr!(($HeadStruct) Default::default()), $(__replace_expr!(($Struct) Default::default()),)*) } } impl std::ops::Deref for $Chain { type Target = [<$Chain Inner>]<'static>; fn deref(&self) -> &Self::Target { std::pin::Pin::deref(&self.0) } } } impl<$lifetime: 'static> AsRef<$HeadStruct> for $Chain { fn as_ref(&self) -> &$HeadStruct { &self.0.as_ref().get_ref().$HeadName } } impl<$lifetime: 'static> AsMut<$HeadStruct> for $Chain { fn as_mut(&mut self) -> &mut $HeadStruct { &mut self.0.as_mut().get_mut().$HeadName } } }; } macro_rules! __set_p_next( ($target:ident, $head:ident, $next:ident) => { $target.$head.p_next = <*mut _>::cast(&mut $target.$next); }; ($target:ident, $head:ident, $next:ident, $($tail:ident),+) => { $target.$head.p_next = <*mut _>::cast(&mut $target.$next); __set_p_next!($target, $next, $($tail),+); }; ); macro_rules! __replace_expr { ($_t:tt $sub:expr) => { $sub }; } pub(crate) use __replace_expr; pub(crate) use __set_p_next; pub(crate) use vk_chain; #[cfg(test)] mod tests { use ash::vk; #[test] fn test_chain() { vk_chain! { pub struct H264EncodeProfile<'a> { pub profile: vk::VideoProfileInfoKHR<'a>, pub encode_usage_info: vk::VideoEncodeUsageInfoKHR<'a>, pub h264_profile: vk::VideoEncodeH264ProfileInfoEXT<'a>, } } let mut chain = H264EncodeProfile::new( vk::VideoProfileInfoKHR::default(), vk::VideoEncodeUsageInfoKHR::default(), vk::VideoEncodeH264ProfileInfoEXT::default(), ); chain.with_encode_usage_info(|info| { info.video_usage_hints(vk::VideoEncodeUsageFlagsKHR::STREAMING) }); assert_eq!( chain.encode_usage_info.video_usage_hints, vk::VideoEncodeUsageFlagsKHR::STREAMING ); } } ================================================ FILE: mm-server/src/vulkan/drm.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ fs::{File, OpenOptions}, os::fd::{AsFd, BorrowedFd}, }; use anyhow::anyhow; use libc::dev_t; pub struct DrmDevice(File); impl AsFd for DrmDevice { fn as_fd(&self) -> BorrowedFd<'_> { self.0.as_fd() } } impl drm::Device for DrmDevice {} impl drm::control::Device for DrmDevice {} impl DrmDevice { pub fn new(dev: dev_t) -> anyhow::Result { let path = drm::node::DrmNode::from_dev_id(dev)? .dev_path() .ok_or(anyhow!("no device file found"))?; let mut options = OpenOptions::new(); options.read(true); options.write(true); Ok(Self(options.open(path)?)) } } ================================================ FILE: mm-server/src/vulkan/timeline.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::{ os::fd::{IntoRawFd as _, OwnedFd}, sync::Arc, }; use anyhow::Context as _; use ash::vk; use tracing::instrument; use super::VkContext; #[derive(Clone)] pub struct VkTimelineSemaphore(Arc); struct Inner { vk: Arc, sema: vk::Semaphore, } #[derive(Clone)] pub struct VkTimelinePoint(Arc, u64); impl From for u64 { fn from(value: VkTimelinePoint) -> Self { value.1 } } impl std::ops::Add for VkTimelinePoint { type Output = Self; fn add(self, rhs: u64) -> Self { Self(self.0, self.1 + rhs) } } impl std::ops::Add for &VkTimelinePoint { type Output = VkTimelinePoint; fn add(self, rhs: u64) -> Self::Output { VkTimelinePoint(self.0.clone(), self.1 + rhs) } } impl std::ops::AddAssign for VkTimelinePoint { fn add_assign(&mut self, rhs: u64) { self.1 += rhs } } impl VkTimelineSemaphore { pub fn new(vk: Arc, initial_value: u64) -> anyhow::Result { let sema = unsafe { vk.device .create_semaphore( &vk::SemaphoreCreateInfo::default().push_next( &mut vk::SemaphoreTypeCreateInfo::default() .semaphore_type(vk::SemaphoreType::TIMELINE) .initial_value(initial_value), ), None, ) .context("VkCreateSemaphore")? }; Ok(Self(Arc::new(Inner { vk, sema }))) } pub fn from_syncobj_fd(vk: Arc, fd: OwnedFd) -> anyhow::Result { let sema = Self::new(vk.clone(), 0)?; let import_info = vk::ImportSemaphoreFdInfoKHR::default() .semaphore(sema.as_semaphore()) .handle_type(vk::ExternalSemaphoreHandleTypeFlags::OPAQUE_FD) .fd(fd.into_raw_fd()); // Vulkan owns the fd now. unsafe { vk.external_semaphore_api .import_semaphore_fd(&import_info) .context("VkImportSemaphoreFdKHR")?; } Ok(sema) } pub fn new_point(&self, value: u64) -> VkTimelinePoint { VkTimelinePoint(self.0.clone(), value) } pub fn as_semaphore(&self) -> vk::Semaphore { self.0.sema } } impl VkTimelinePoint { pub fn value(&self) -> u64 { self.1 } pub fn timeline(&self) -> VkTimelineSemaphore { VkTimelineSemaphore(self.0.clone()) } #[instrument(skip_all)] pub unsafe fn wait(&self) -> anyhow::Result<()> { let device = &self.0.vk.device; device.wait_semaphores( &vk::SemaphoreWaitInfo::default() .semaphores(&[self.0.sema]) .values(&[self.1]), 1_000_000_000, // 1 second )?; Ok(()) } #[instrument(skip_all)] pub unsafe fn signal(&self) -> anyhow::Result<()> { let device = &self.0.vk.device; device.signal_semaphore( &vk::SemaphoreSignalInfo::default() .semaphore(self.0.sema) .value(self.1), )?; Ok(()) } pub unsafe fn poll(&self) -> anyhow::Result { let device = &self.0.vk.device; let value = device.get_semaphore_counter_value(self.0.sema)?; Ok(value >= self.1) } } impl Drop for Inner { fn drop(&mut self) { unsafe { self.vk.device.device_wait_idle().unwrap(); self.vk.device.destroy_semaphore(self.sema, None) }; } } ================================================ FILE: mm-server/src/vulkan/video.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use ash::prelude::*; use ash::vk; use ash::RawPtr; pub struct VideoQueueExt { handle: vk::Device, fp: vk::KhrVideoQueueFn, } #[allow(dead_code)] impl VideoQueueExt { pub fn new(entry: &ash::Entry, instance: &ash::Instance, device: &ash::Device) -> Self { let handle = device.handle(); let fp = vk::KhrVideoQueueFn::load(|name| unsafe { std::mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) }); Self { handle, fp } } #[inline] pub fn name() -> &'static std::ffi::CStr { vk::KhrVideoDecodeQueueFn::NAME } #[inline] /// pub unsafe fn bind_video_session_memory( &self, device: &ash::Device, video_session: vk::VideoSessionKHR, bind_session_memory_infos: &[vk::BindVideoSessionMemoryInfoKHR], ) -> VkResult<()> { (self.fp.bind_video_session_memory_khr)( device.handle(), video_session, bind_session_memory_infos.len() as u32, bind_session_memory_infos.as_ptr(), ) .result() } #[inline] /// pub unsafe fn cmd_begin_video_coding( &self, command_buffer: vk::CommandBuffer, begin_info: &vk::VideoBeginCodingInfoKHR, ) { (self.fp.cmd_begin_video_coding_khr)(command_buffer, begin_info); } #[inline] /// pub unsafe fn cmd_control_video_coding( &self, command_buffer: vk::CommandBuffer, coding_control_info: &vk::VideoCodingControlInfoKHR, ) { (self.fp.cmd_control_video_coding_khr)(command_buffer, coding_control_info); } #[inline] /// pub unsafe fn cmd_end_video_coding( &self, command_buffer: vk::CommandBuffer, end_coding_info: &vk::VideoEndCodingInfoKHR, ) { (self.fp.cmd_end_video_coding_khr)(command_buffer, end_coding_info); } #[inline] /// pub unsafe fn create_video_session( &self, create_info: &vk::VideoSessionCreateInfoKHR, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { let mut video_session = std::mem::zeroed(); (self.fp.create_video_session_khr)( self.handle, create_info, allocation_callbacks.as_raw_ptr(), &mut video_session, ) .result_with_success(video_session) } #[inline] /// pub unsafe fn create_video_session_parameters( &self, create_info: &vk::VideoSessionParametersCreateInfoKHR, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) -> VkResult { let mut video_session_parameters = std::mem::zeroed(); (self.fp.create_video_session_parameters_khr)( self.handle, create_info, allocation_callbacks.as_raw_ptr(), &mut video_session_parameters, ) .result_with_success(video_session_parameters) } #[inline] /// pub unsafe fn destroy_video_session( &self, video_session: vk::VideoSessionKHR, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) { (self.fp.destroy_video_session_khr)( self.handle, video_session, allocation_callbacks.as_raw_ptr(), ); } #[inline] /// pub unsafe fn destroy_video_session_parameters( &self, video_session_parameters: vk::VideoSessionParametersKHR, allocation_callbacks: Option<&vk::AllocationCallbacks>, ) { (self.fp.destroy_video_session_parameters_khr)( self.handle, video_session_parameters, allocation_callbacks.as_raw_ptr(), ); } #[inline] /// pub unsafe fn get_physical_device_video_capabilities( &self, physical_device: vk::PhysicalDevice, video_profile: &vk::VideoProfileInfoKHR, capabilities: &mut vk::VideoCapabilitiesKHR, ) -> VkResult<()> { (self.fp.get_physical_device_video_capabilities_khr)( physical_device, video_profile, capabilities, ) .result() } #[inline] /// pub unsafe fn get_physical_device_video_format_properties( &self, physical_device: vk::PhysicalDevice, video_format_info: &vk::PhysicalDeviceVideoFormatInfoKHR, ) -> VkResult> { read_into_defaulted_vector(|count, data| { (self.fp.get_physical_device_video_format_properties_khr)( physical_device, video_format_info, count, data, ) }) } #[inline] /// pub unsafe fn get_video_session_memory_requirements( &self, video_session: vk::VideoSessionKHR, ) -> VkResult> { read_into_defaulted_vector(|count, data| { (self.fp.get_video_session_memory_requirements_khr)( self.handle, video_session, count, data, ) }) } #[inline] /// pub unsafe fn update_video_session_parameters( &self, video_session_parameters: vk::VideoSessionParametersKHR, update_info: &vk::VideoSessionParametersUpdateInfoKHR, ) -> VkResult<()> { (self.fp.update_video_session_parameters_khr)( self.handle, video_session_parameters, update_info, ) .result() } } pub struct VideoDecodeQueueExt { fp: vk::KhrVideoDecodeQueueFn, } #[allow(dead_code)] impl VideoDecodeQueueExt { pub fn new(entry: &ash::Entry, instance: &ash::Instance) -> Self { let fp = vk::KhrVideoDecodeQueueFn::load(|name| unsafe { std::mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) }); Self { fp } } #[inline] /// pub unsafe fn cmd_decode_video( &self, command_buffer: vk::CommandBuffer, decode_info: &vk::VideoDecodeInfoKHR, ) { (self.fp.cmd_decode_video_khr)(command_buffer, decode_info); } } pub struct VideoEncodeQueueExt { handle: vk::Device, fp: vk::KhrVideoEncodeQueueFn, } #[allow(dead_code)] impl VideoEncodeQueueExt { pub fn new(entry: &ash::Entry, instance: &ash::Instance, device: &ash::Device) -> Self { let handle = device.handle(); let fp = vk::KhrVideoEncodeQueueFn::load(|name| unsafe { std::mem::transmute(entry.get_instance_proc_addr(instance.handle(), name.as_ptr())) }); Self { handle, fp } } #[inline] /// pub unsafe fn get_physical_device_video_encode_quality_level_properties( &self, physical_device: vk::PhysicalDevice, quality_level_info: &vk::PhysicalDeviceVideoEncodeQualityLevelInfoKHR, quality_level_properties: &mut vk::VideoEncodeQualityLevelPropertiesKHR, ) -> VkResult<()> { (self .fp .get_physical_device_video_encode_quality_level_properties_khr)( physical_device, quality_level_info, quality_level_properties, ) .result() } #[inline] /// pub unsafe fn cmd_encode_video( &self, command_buffer: vk::CommandBuffer, encode_info: &vk::VideoEncodeInfoKHR, ) { (self.fp.cmd_encode_video_khr)(command_buffer, encode_info); } #[inline] /// pub unsafe fn get_encoded_video_session_parameters( &self, session_parameters_info: &vk::VideoEncodeSessionParametersGetInfoKHR, info: &mut vk::VideoEncodeSessionParametersFeedbackInfoKHR, ) -> VkResult> { let ptr = info as *mut _; read_into_uninitialized_vector(|count, data: *mut u8| { (self.fp.get_encoded_video_session_parameters_khr)( self.handle, session_parameters_info, ptr, count, data.cast(), ) }) } } // Copied from ash. /// Repeatedly calls `f` until it does not return [`vk::Result::INCOMPLETE`] /// anymore, ensuring all available data has been read into the vector. /// /// See for example [`vkEnumerateInstanceExtensionProperties`]: the number of /// available items may change between calls; [`vk::Result::INCOMPLETE`] is /// returned when the count increased (and the vector is not large enough after /// querying the initial size), requiring Ash to try again. /// /// [`vkEnumerateInstanceExtensionProperties`]: https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/vkEnumerateInstanceExtensionProperties.html pub(crate) unsafe fn read_into_uninitialized_vector, T>( f: impl Fn(&mut N, *mut T) -> vk::Result, ) -> VkResult> where >::Error: std::fmt::Debug, { loop { let mut count = N::default(); f(&mut count, std::ptr::null_mut()).result()?; let mut data = Vec::with_capacity(count.try_into().expect("`N` failed to convert to `usize`")); let err_code = f(&mut count, data.as_mut_ptr()); if err_code != vk::Result::INCOMPLETE { break err_code.set_vec_len_on_success( data, count.try_into().expect("`N` failed to convert to `usize`"), ); } } } /// Repeatedly calls `f` until it does not return [`vk::Result::INCOMPLETE`] /// anymore, ensuring all available data has been read into the vector. /// /// Items in the target vector are [`default()`][Default::default()]-initialized /// which is required for [`vk::BaseOutStructure`]-like structs where /// [`vk::BaseOutStructure::s_type`] needs to be a valid type and /// [`vk::BaseOutStructure::p_next`] a valid or [`null`][std::ptr::null_mut()] /// pointer. /// /// See for example [`vkEnumerateInstanceExtensionProperties`]: the number of /// available items may change between calls; [`vk::Result::INCOMPLETE`] is /// returned when the count increased (and the vector is not large enough after /// querying the initial size), requiring Ash to try again. /// /// [`vkEnumerateInstanceExtensionProperties`]: https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/vkEnumerateInstanceExtensionProperties.html pub(crate) unsafe fn read_into_defaulted_vector< N: Copy + Default + TryInto, T: Default + Clone, >( f: impl Fn(&mut N, *mut T) -> vk::Result, ) -> VkResult> where >::Error: std::fmt::Debug, { loop { let mut count = N::default(); f(&mut count, std::ptr::null_mut()).result()?; let mut data = vec![Default::default(); count.try_into().expect("`N` failed to convert to `usize`")]; let err_code = f(&mut count, data.as_mut_ptr()); if err_code != vk::Result::INCOMPLETE { data.set_len(count.try_into().expect("`N` failed to convert to `usize`")); break err_code.result_with_success(data); } } } ================================================ FILE: mm-server/src/vulkan.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 #![allow(clippy::too_many_arguments)] mod chain; mod drm; mod timeline; pub mod video; use std::ffi::{c_void, CStr, CString}; use std::sync::Arc; use anyhow::{bail, Context, Result}; use ash::extensions::{ext, khr}; use ash::vk; pub(crate) use chain::*; use cstr::cstr; pub use timeline::*; use tracing::{debug, error, info, instrument, warn}; use self::video::{VideoEncodeQueueExt, VideoQueueExt}; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum Vendor { Amd, Nvidia, Other, } #[derive(Debug, Clone, PartialEq, Eq)] pub enum DriverVersion { MesaRadv { major: u32, minor: u32, patch: u32 }, NvidiaProprietary { major: u32, minor: u32 }, Other(String), } pub struct VkContext { pub entry: ash::Entry, pub push_ds_api: khr::PushDescriptor, pub external_memory_api: khr::ExternalMemoryFd, pub external_semaphore_api: khr::ExternalSemaphoreFd, pub video_apis: Option<(VideoQueueExt, VideoEncodeQueueExt)>, pub instance: ash::Instance, pub debug: Option, pub device: ash::Device, pub device_info: VkDeviceInfo, pub drm_device: drm::DrmDevice, pub graphics_queue: VkQueue, pub encode_queue: Option, pub descriptor_pool: vk::DescriptorPool, } pub struct VkDebugContext { debug: ext::DebugUtils, messenger: vk::DebugUtilsMessengerEXT, } #[derive(Clone)] pub struct VkQueue { pub family: u32, pub queue: vk::Queue, pub command_pool: vk::CommandPool, // TODO: synchronize access #[allow(unused)] pub tracy_context: Option, } impl VkQueue { pub fn new( device: &ash::Device, _pdevice: &VkDeviceInfo, _props: vk::QueueFamilyProperties, family: u32, _name: &str, ) -> Result { let queue = unsafe { device.get_device_queue(family, 0) }; let command_pool = unsafe { let create_info = vk::CommandPoolCreateInfo::default() .queue_family_index(family) .flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER); device.create_command_pool(&create_info, None)? }; #[cfg(feature = "tracy")] let tracy_context = tracy_client::Client::running().and_then(|client| { if _props.timestamp_valid_bits == 0 { debug!( "queue family {:?} does not support timestamp queries", family ); return None; } match init_tracy_context(device, _pdevice, queue, command_pool, client, _name) { Ok(ctx) => Some(ctx), Err(err) => { error!("failed to initialize tracy GPU context: {err}"); None } } }); #[cfg(not(feature = "tracy"))] let tracy_context = None; Ok(Self { family, queue, command_pool, tracy_context, }) } } pub struct VkDeviceInfo { pub pdevice: vk::PhysicalDevice, pub device_name: CString, pub device_type: vk::PhysicalDeviceType, pub device_vendor: Vendor, pub driver_version: DriverVersion, pub limits: vk::PhysicalDeviceLimits, pub drm_node: libc::dev_t, pub graphics_family: u32, pub encode_family: Option, pub supports_h264: bool, pub supports_h265: bool, pub supports_av1: bool, pub memory_props: vk::PhysicalDeviceMemoryProperties, pub host_visible_mem_type_index: u32, pub host_mem_is_cached: bool, pub selected_extensions: Vec, } impl VkDeviceInfo { fn query(instance: &ash::Instance, device: vk::PhysicalDevice) -> Result { let mut drm_props = vk::PhysicalDeviceDrmPropertiesEXT::default(); let mut host_mem_props = vk::PhysicalDeviceExternalMemoryHostPropertiesEXT::default(); let mut driver_props = vk::PhysicalDeviceDriverPropertiesKHR::default(); let mut props = vk::PhysicalDeviceProperties2::default() .push_next(&mut drm_props) .push_next(&mut host_mem_props) .push_next(&mut driver_props); unsafe { instance.get_physical_device_properties2(device, &mut props) }; let limits = props.properties.limits; let device_type = props.properties.device_type; let device_name = unsafe { CStr::from_ptr(props.properties.device_name.as_ptr()).to_owned() }; let device_vendor = match props.properties.vendor_id { 0x1002 => Vendor::Amd, 0x10de => Vendor::Nvidia, _ => Vendor::Other, }; let version = props.properties.driver_version; let driver_version = match driver_props.driver_id { vk::DriverId::MESA_RADV => DriverVersion::MesaRadv { major: vk::api_version_major(version), minor: vk::api_version_minor(version), patch: vk::api_version_patch(version), }, vk::DriverId::NVIDIA_PROPRIETARY => DriverVersion::NvidiaProprietary { major: (version >> 22) & 0x3ff, minor: (version >> 14) & 0x0ff, }, _ => DriverVersion::Other( CStr::from_bytes_with_nul(&driver_props.driver_info.map(|x| x as u8)[..]) .unwrap_or(c"unknown") .to_str() .unwrap_or("unknown") .to_owned(), ), }; if drm_props.render_major != 226 || drm_props.render_minor < 128 { bail!("device {:?} is not a render node", device_name); } let drm_node = libc::makedev(drm_props.render_major as u32, drm_props.render_minor as u32); let queue_families = unsafe { instance .get_physical_device_queue_family_properties(device) .into_iter() .collect::>() }; let graphics_family = queue_families .iter() .enumerate() .find(|(_, properties)| { properties.queue_flags.contains(vk::QueueFlags::GRAPHICS) && properties.queue_flags.contains(vk::QueueFlags::COMPUTE) }) .map(|(index, _)| index as u32) .to_owned() .ok_or_else(|| anyhow::anyhow!("no graphics queue found"))?; let encode_family = queue_families .iter() .enumerate() .find(|(_, properties)| { properties .queue_flags .contains(vk::QueueFlags::VIDEO_ENCODE_KHR) }) .map(|(index, _)| index as u32); let available_extensions = unsafe { instance .enumerate_device_extension_properties(device) .unwrap() .into_iter() .map(|properties| CStr::from_ptr(&properties.extension_name as *const _).to_owned()) .collect::>() }; let mut selected_extensions = vec![ // Push descriptors for compositing. vk::KhrPushDescriptorFn::NAME.to_owned(), // All required for dma-buf import. vk::KhrExternalMemoryFdFn::NAME.to_owned(), vk::KhrExternalSemaphoreFdFn::NAME.to_owned(), vk::ExtExternalMemoryDmaBufFn::NAME.to_owned(), vk::ExtImageDrmFormatModifierFn::NAME.to_owned(), vk::ExtPhysicalDeviceDrmFn::NAME.to_owned(), vk::ExtQueueFamilyForeignFn::NAME.to_owned(), ]; for ext in selected_extensions.iter() { if !contains_extension(&available_extensions, ext) { bail!("extension {:?} not available", ext); } } let ext_video_queue = vk::KhrVideoQueueFn::NAME; let ext_video_encode_queue = vk::KhrVideoEncodeQueueFn::NAME; // TODO: ash hasn't picked up the promoted names yet. let ext_h264 = cstr!("VK_KHR_video_encode_h264"); let ext_h265 = cstr!("VK_KHR_video_encode_h265"); // This doesn't exist yet. let ext_av1 = cstr!("VK_EXT_video_encode_av1"); let mut supports_h264 = false; let mut supports_h265 = false; let mut supports_av1 = false; if encode_family.is_some() && contains_extension(&available_extensions, ext_video_queue) && contains_extension(&available_extensions, ext_video_encode_queue) { selected_extensions.push(ext_video_encode_queue.to_owned()); selected_extensions.push(ext_video_queue.to_owned()); if contains_extension(&available_extensions, ext_h264) { supports_h264 = true; selected_extensions.push(ext_h264.to_owned()); } if contains_extension(&available_extensions, ext_h265) { supports_h265 = true; selected_extensions.push(ext_h265.to_owned()); } if contains_extension(&available_extensions, ext_av1) { supports_av1 = true; selected_extensions.push(ext_av1.to_owned()); } } if !supports_av1 && !supports_h265 && !supports_h264 { bail!("hardware encode extensions not available"); } // We want HOST_CACHED | HOST_COHERENT, but we can make do with just // HOST_VISIBLE. let memory_props = unsafe { instance.get_physical_device_memory_properties(device) }; let (host_visible_mem_type_index, host_mem_is_cached) = { let mut cached = true; let mut idx = select_memory_type( &memory_props, vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_CACHED | vk::MemoryPropertyFlags::HOST_COHERENT, None, ); if idx.is_none() { idx = select_memory_type( &memory_props, vk::MemoryPropertyFlags::HOST_VISIBLE | vk::MemoryPropertyFlags::HOST_COHERENT, None, ); if idx.is_none() { bail!("no host visible memory type found"); } cached = false; } (idx.unwrap(), cached) }; // Make sure we have the features needed for dmabuf import. let mut semaphore_props = vk::ExternalSemaphoreProperties::default(); unsafe { let info = vk::PhysicalDeviceExternalSemaphoreInfo::default() .handle_type(vk::ExternalSemaphoreHandleTypeFlags::SYNC_FD); instance.get_physical_device_external_semaphore_properties( device, &info, &mut semaphore_props, ); } if !semaphore_props .external_semaphore_features .contains(vk::ExternalSemaphoreFeatureFlags::IMPORTABLE) { bail!("no support found for importable semaphores"); } Ok(Self { pdevice: device, device_name, device_type, device_vendor, driver_version, limits, drm_node, graphics_family, encode_family, supports_h264, supports_h265, supports_av1, memory_props, host_visible_mem_type_index, host_mem_is_cached, selected_extensions, }) } } impl VkContext { pub fn new(enable_debug: bool) -> Result { // Try to enable RADV's video support. std::env::set_var( "RADV_PERFTEST", std::env::var("RADV_PERFTEST").unwrap_or("video_encode,video_decode".to_string()), ); let entry = unsafe { ash::Entry::load().context("failed to load vulkan libraries!") }?; debug!("creating vulkan instance"); let (major, minor) = match unsafe { entry.try_enumerate_instance_version()? } { // Vulkan 1.1+ Some(version) => ( vk::api_version_major(version), vk::api_version_minor(version), ), // Vulkan 1.0 None => (1, 0), }; if major < 1 || (major == 1 && minor < 3) { return Err(anyhow::anyhow!("vulkan 1.3 or higher is required")); } let app_info = vk::ApplicationInfo::default() .application_name(cstr!("Magic Mirror")) .application_version(vk::make_api_version(0, 0, 1, 0)) .engine_name(cstr!("No Engine")) .engine_version(vk::make_api_version(0, 0, 1, 0)) .api_version(vk::make_api_version(0, major, minor, 0)); let available_extensions = unsafe { entry .enumerate_instance_extension_properties(None)? .into_iter() .map(|properties| CStr::from_ptr(&properties.extension_name as *const _).to_owned()) .collect::>() }; let mut extensions = Vec::new(); let mut layers = Vec::new(); if enable_debug { if !available_extensions .iter() .any(|ext| ext.as_c_str() == ext::DebugUtils::NAME) { return Err(anyhow::anyhow!( "debug utils extension requested, but not available" )); } warn!("vulkan validation layers enabled!"); extensions.push(ext::DebugUtils::NAME.as_ptr()); unsafe { let validation_layer = cstr!("VK_LAYER_KHRONOS_validation"); if entry .enumerate_instance_layer_properties()? .into_iter() .map(|properties| CStr::from_ptr(&properties.layer_name as *const _)) .any(|layer| layer == validation_layer) { layers.push(validation_layer.as_ptr()); } else { warn!("validation layers requested, but not available!") } } } let instance_create_info = vk::InstanceCreateInfo::default() .application_info(&app_info) .enabled_layer_names(&layers) .enabled_extension_names(&extensions); let instance = unsafe { entry.create_instance(&instance_create_info, None)? }; // Enable validation layers and a debugging callback, if requested. let debug_utils = if enable_debug { let debug_utils = ext::DebugUtils::new(&entry, &instance); let create_info = vk::DebugUtilsMessengerCreateInfoEXT::default() .message_severity( vk::DebugUtilsMessageSeverityFlagsEXT::WARNING | vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE | vk::DebugUtilsMessageSeverityFlagsEXT::INFO | vk::DebugUtilsMessageSeverityFlagsEXT::ERROR, ) .message_type( vk::DebugUtilsMessageTypeFlagsEXT::GENERAL | vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE | vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION, ) .pfn_user_callback(Some(vulkan_debug_utils_callback)); let messenger = unsafe { debug_utils.create_debug_utils_messenger(&create_info, None) }?; Some(VkDebugContext { debug: debug_utils, messenger, }) } else { None }; // Select a device based on encoding support. let devices = unsafe { instance.enumerate_physical_devices()? }; let mut devices = devices .into_iter() .enumerate() .flat_map(|(index, dev)| match VkDeviceInfo::query(&instance, dev) { Ok(device) => Some((index as u32, device)), Err(err) => { let device_name = unsafe { CStr::from_ptr( instance .get_physical_device_properties(dev) .device_name .as_ptr(), ) .to_owned() }; info!("gpu {device_name:?} ineligible: {err}"); None } }) .collect::>(); if devices.is_empty() { return Err(anyhow::anyhow!("no suitable gpu found")); } devices.sort_by_key(|(_, dev)| { let mut score = match dev.device_type { vk::PhysicalDeviceType::DISCRETE_GPU => 0, vk::PhysicalDeviceType::INTEGRATED_GPU => 10, _ => 20, }; score += dev.encode_family.is_none() as u32; score += !dev.supports_h264 as u32; score += !dev.supports_h265 as u32; score += !dev.supports_av1 as u32; score }); let (index, device_info) = devices.remove(0); info!("selected gpu: {:?} ({index})", device_info.device_name); let drm_device = drm::DrmDevice::new(device_info.drm_node)?; let device = { let queue_priorities = &[1.0]; let mut queue_indices = Vec::new(); queue_indices.push(device_info.graphics_family); if let Some(idx) = device_info.encode_family { queue_indices.push(idx); } queue_indices.dedup(); let queue_create_infos = queue_indices .iter() .map(|&index| { vk::DeviceQueueCreateInfo::default() .queue_family_index(index) .queue_priorities(queue_priorities) }) .collect::>(); let mut enabled_1_1_features = vk::PhysicalDeviceVulkan11Features::default().sampler_ycbcr_conversion(true); let mut enabled_1_2_features = vk::PhysicalDeviceVulkan12Features::default() .timeline_semaphore(true) .host_query_reset(true); let mut enabled_1_3_features = vk::PhysicalDeviceVulkan13Features::default() .dynamic_rendering(true) .synchronization2(true); let extension_names = device_info .selected_extensions .iter() .map(|v| v.as_c_str().as_ptr()) .collect::>(); let device_create_info = vk::DeviceCreateInfo::default() .queue_create_infos(&queue_create_infos) .enabled_extension_names(&extension_names) .push_next(&mut enabled_1_1_features) .push_next(&mut enabled_1_2_features) .push_next(&mut enabled_1_3_features); unsafe { instance.create_device(device_info.pdevice, &device_create_info, None)? } }; let qf_props = unsafe { instance.get_physical_device_queue_family_properties(device_info.pdevice) }; let graphics_queue = VkQueue::new( &device, &device_info, qf_props[device_info.graphics_family as usize], device_info.graphics_family, "graphics", )?; let encode_queue = if device_info.encode_family.is_some() { info!( "hardware encoding support: (h264: {}, h265: {}, av1: {})", device_info.supports_h264, device_info.supports_h265, device_info.supports_av1 ); Some(VkQueue::new( &device, &device_info, qf_props[device_info.encode_family.unwrap() as usize], device_info.encode_family.unwrap(), "encode", )?) } else { warn!("no hardware encoding support found!"); None }; if !device_info.host_mem_is_cached { warn!("no cache-coherent memory type found on device!"); } let external_memory_api = khr::ExternalMemoryFd::new(&instance, &device); let external_semaphore_api = khr::ExternalSemaphoreFd::new(&instance, &device); let video_apis = if device_info.encode_family.is_some() { let video_queue = VideoQueueExt::new(&entry, &instance, &device); let video_encode_queue = VideoEncodeQueueExt::new(&entry, &instance, &device); Some((video_queue, video_encode_queue)) } else { None }; let push_ds_api = khr::PushDescriptor::new(&instance, &device); let descriptor_pool = { let pool_sizes = [ vk::DescriptorPoolSize::default() .ty(vk::DescriptorType::COMBINED_IMAGE_SAMPLER) .descriptor_count(1024), vk::DescriptorPoolSize::default() .ty(vk::DescriptorType::STORAGE_IMAGE) .descriptor_count(1024), ]; let create_info = vk::DescriptorPoolCreateInfo::default() .pool_sizes(&pool_sizes) .flags(vk::DescriptorPoolCreateFlags::FREE_DESCRIPTOR_SET) .max_sets(1024); unsafe { device.create_descriptor_pool(&create_info, None)? } }; Ok(Self { entry, push_ds_api, external_memory_api, external_semaphore_api, video_apis, instance, device, device_info, drm_device, graphics_queue, encode_queue, descriptor_pool, debug: debug_utils, }) } } unsafe extern "system" fn vulkan_debug_utils_callback( message_severity: vk::DebugUtilsMessageSeverityFlagsEXT, message_type: vk::DebugUtilsMessageTypeFlagsEXT, p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT, _userdata: *mut c_void, ) -> vk::Bool32 { let _ = std::panic::catch_unwind(|| { let message = unsafe { CStr::from_ptr((*p_callback_data).p_message) }.to_string_lossy(); let ty = format!("{:?}", message_type).to_lowercase(); // TODO: these should all be debug. match message_severity { vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => { tracing::trace!(ty, "{}", message) } vk::DebugUtilsMessageSeverityFlagsEXT::INFO => { debug!(target: "mmserver::vulkan::driver", ty, "{}", message) } vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => { debug!(target: "mmserver::vulkan::driver", ty, "{}", message) } vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => { error!(target: "mmserver::vulkan::driver", ty, "{}", message) } _ => (), } }); // Must always return false. vk::FALSE } impl Drop for VkContext { fn drop(&mut self) { debug!("destroying vulkan instance"); unsafe { if let Some(debug) = self.debug.as_ref() { debug .debug .destroy_debug_utils_messenger(debug.messenger, None); } self.device .destroy_command_pool(self.graphics_queue.command_pool, None); if let Some(encode_queue) = self.encode_queue.as_ref() { self.device .destroy_command_pool(encode_queue.command_pool, None); } self.device .destroy_descriptor_pool(self.descriptor_pool, None); self.device.destroy_device(None); self.instance.destroy_instance(None); } } } #[cfg(feature = "tracy")] fn init_tracy_context( device: &ash::Device, pdevice: &VkDeviceInfo, queue: vk::Queue, command_pool: vk::CommandPool, client: tracy_client::Client, name: &str, ) -> anyhow::Result { // Query the timestamp once to calibrate the clocks. let cb = allocate_command_buffer(device, command_pool)?; unsafe { device.reset_command_buffer(cb, vk::CommandBufferResetFlags::empty())?; let query_pool = create_timestamp_query_pool(device, 1)?; let fence = device.create_fence(&vk::FenceCreateInfo::default(), None)?; // Begin the command buffer. device.begin_command_buffer( cb, &vk::CommandBufferBeginInfo::default() .flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT), )?; // Write a timestamp. query_pool.cmd_reset(device, cb); device.cmd_write_timestamp( cb, vk::PipelineStageFlags::BOTTOM_OF_PIPE, query_pool.pool, 0, ); // Submit. device.end_command_buffer(cb)?; device.queue_submit( queue, &[vk::SubmitInfo::default().command_buffers(&[cb])], fence, )?; // Wait for the fence, fetch the timestamp. device.wait_for_fences(&[fence], true, u64::MAX)?; let ts = query_pool.fetch_results(device)?[0]; let context = client.new_gpu_context( Some(name), tracy_client::GpuContextType::Vulkan, ts as i64, pdevice.limits.timestamp_period, )?; // Cleanup. device.free_command_buffers(command_pool, &[cb]); device.destroy_fence(fence, None); device.destroy_query_pool(query_pool.pool, None); Ok(context) } } pub fn select_memory_type( props: &vk::PhysicalDeviceMemoryProperties, flags: vk::MemoryPropertyFlags, memory_type_bits: Option, ) -> Option { for i in 0..props.memory_type_count { if let Some(mask) = memory_type_bits { if mask & (1 << i) == 0 { continue; } } if flags.is_empty() || props.memory_types[i as usize] .property_flags .contains(flags) { return Some(i); } } None } pub struct VkImage { pub image: vk::Image, pub view: vk::ImageView, pub memory: vk::DeviceMemory, pub format: vk::Format, pub width: u32, pub height: u32, vk: Arc, } impl VkImage { pub fn new( vk: Arc, format: vk::Format, ignore_alpha: bool, width: u32, height: u32, usage: vk::ImageUsageFlags, sharing_mode: vk::SharingMode, flags: vk::ImageCreateFlags, ) -> anyhow::Result { let image = { let create_info = vk::ImageCreateInfo::default() .image_type(vk::ImageType::TYPE_2D) .format(format) .extent(vk::Extent3D { width, height, depth: 1, }) .mip_levels(1) .array_layers(1) .samples(vk::SampleCountFlags::TYPE_1) .tiling(vk::ImageTiling::OPTIMAL) .usage(usage) .sharing_mode(sharing_mode) .initial_layout(vk::ImageLayout::UNDEFINED) .flags(flags); unsafe { vk.device .create_image(&create_info, None) .context("VkCreateImage")? } }; let memory = unsafe { bind_memory_for_image(&vk.device, &vk.device_info.memory_props, image)? }; let view = unsafe { create_image_view(&vk.device, image, format, ignore_alpha)? }; Ok(Self { image, view, memory, format, width, height, vk, }) } pub fn wrap( vk: Arc, image: vk::Image, view: vk::ImageView, memory: vk::DeviceMemory, format: vk::Format, width: u32, height: u32, ) -> Self { Self { image, view, memory, format, width, height, vk, } } pub fn extent(&self) -> vk::Extent2D { vk::Extent2D { width: self.width, height: self.height, } } pub fn rect(&self) -> vk::Rect2D { vk::Rect2D { offset: vk::Offset2D { x: 0, y: 0 }, extent: self.extent(), } } } impl Drop for VkImage { fn drop(&mut self) { unsafe { self.vk.device.destroy_image_view(self.view, None); self.vk.device.destroy_image(self.image, None); self.vk.device.free_memory(self.memory, None); } } } pub unsafe fn bind_memory_for_image( device: &ash::Device, props: &vk::PhysicalDeviceMemoryProperties, image: vk::Image, ) -> anyhow::Result { let image_memory_req = unsafe { device.get_image_memory_requirements(image) }; let mem_type_index = select_memory_type( props, vk::MemoryPropertyFlags::DEVICE_LOCAL, Some(image_memory_req.memory_type_bits), ); if mem_type_index.is_none() { bail!( "no appropriate memory type found for reqs: {:?}", image_memory_req ); } let memory = { let image_allocate_info = vk::MemoryAllocateInfo::default() .allocation_size(image_memory_req.size) .memory_type_index(mem_type_index.unwrap()); unsafe { device .allocate_memory(&image_allocate_info, None) .context("VkAllocateMemory")? } }; unsafe { device .bind_image_memory(image, memory, 0) .context("VkBindImageMemory")?; } Ok(memory) } pub unsafe fn create_image_view( device: &ash::Device, image: vk::Image, format: vk::Format, ignore_alpha: bool, ) -> anyhow::Result { let alpha_swizzle = if ignore_alpha { vk::ComponentSwizzle::ONE } else { vk::ComponentSwizzle::IDENTITY }; let create_info = vk::ImageViewCreateInfo::default() .image(image) .view_type(vk::ImageViewType::TYPE_2D) .format(format) .components(vk::ComponentMapping { r: vk::ComponentSwizzle::IDENTITY, g: vk::ComponentSwizzle::IDENTITY, b: vk::ComponentSwizzle::IDENTITY, a: alpha_swizzle, }) .subresource_range(vk::ImageSubresourceRange { aspect_mask: vk::ImageAspectFlags::COLOR, base_mip_level: 0, level_count: vk::REMAINING_MIP_LEVELS, base_array_layer: 0, layer_count: vk::REMAINING_ARRAY_LAYERS, }); device .create_image_view(&create_info, None) .context("VkCreateImageView") } pub struct VkHostBuffer { pub buffer: vk::Buffer, pub memory: vk::DeviceMemory, pub access: *mut c_void, pub len: usize, vk: Arc, } unsafe impl Send for VkHostBuffer {} impl VkHostBuffer { pub fn new( vk: Arc, mem_type: u32, usage: vk::BufferUsageFlags, size: usize, ) -> anyhow::Result { let buffer = { let create_info = vk::BufferCreateInfo::default() .size(size as u64) .usage(usage) .sharing_mode(vk::SharingMode::EXCLUSIVE); unsafe { vk.device .create_buffer(&create_info, None) .context("VkCreateBuffer")? } }; let requirements = unsafe { vk.device.get_buffer_memory_requirements(buffer) }; let alloc_info = vk::MemoryAllocateInfo::default() .allocation_size(requirements.size) .memory_type_index(mem_type); let memory = unsafe { vk.device .allocate_memory(&alloc_info, None) .context("VkAllocateMemory")? }; unsafe { vk.device .bind_buffer_memory(buffer, memory, 0) .context("vkBindBufferMemory")? }; let access = { unsafe { vk.device .map_memory(memory, 0, vk::WHOLE_SIZE, vk::MemoryMapFlags::empty()) .context("VkMapMemory")? } }; Ok(VkHostBuffer { buffer, memory, access, len: size, vk, }) } pub(crate) fn wrap( vk: Arc, buf: vk::Buffer, memory: vk::DeviceMemory, buffer_size: usize, ) -> Self { let access = unsafe { vk.device .map_memory(memory, 0, vk::WHOLE_SIZE, vk::MemoryMapFlags::empty()) .context("failed to map buffer memory") .unwrap() }; Self { buffer: buf, memory, access, len: buffer_size, vk, } } pub fn copy_from_slice(&mut self, src: &[u8]) { let dst = unsafe { std::slice::from_raw_parts_mut(self.access as *mut u8, self.len) }; dst.copy_from_slice(src); } } impl Drop for VkHostBuffer { fn drop(&mut self) { unsafe { self.vk.device.unmap_memory(self.memory); self.vk.device.destroy_buffer(self.buffer, None); self.vk.device.free_memory(self.memory, None); } } } pub struct VkTimestampQueryPool { pub pool: vk::QueryPool, num_timestamps: u32, } impl VkTimestampQueryPool { pub unsafe fn cmd_reset(&self, device: &ash::Device, command_buffer: vk::CommandBuffer) { device.cmd_reset_query_pool(command_buffer, self.pool, 0, self.num_timestamps); } pub fn fetch_results(&self, device: &ash::Device) -> anyhow::Result> { let mut results = vec![0_i64; self.num_timestamps as usize]; unsafe { device .get_query_pool_results(self.pool, 0, &mut results, vk::QueryResultFlags::WAIT) .context("vkGetQueryPoolResults")?; } for v in &results { assert!(v > &0_i64, "invalid query pool results") } Ok(results) } } pub fn create_timestamp_query_pool( device: &ash::Device, num_timestamps: u32, ) -> anyhow::Result { let create_info = vk::QueryPoolCreateInfo::default() .query_type(vk::QueryType::TIMESTAMP) .query_count(num_timestamps); let pool = unsafe { device .create_query_pool(&create_info, None) .context("vkCreateQueryPool")? }; Ok(VkTimestampQueryPool { pool, num_timestamps, }) } pub fn load_shader(device: &ash::Device, bytes: &[u8]) -> anyhow::Result { let code = ash::util::read_spv(&mut std::io::Cursor::new(bytes))?; let create_info = vk::ShaderModuleCreateInfo::default().code(&code); let shader = unsafe { device.create_shader_module(&create_info, None)? }; Ok(shader) } pub fn allocate_command_buffer( device: &ash::Device, pool: vk::CommandPool, ) -> anyhow::Result { let create_info = vk::CommandBufferAllocateInfo::default() .level(vk::CommandBufferLevel::PRIMARY) .command_pool(pool) .command_buffer_count(1); let cb = unsafe { device .allocate_command_buffers(&create_info) .context("failed to allocate render command buffer")? .pop() .unwrap() }; Ok(cb) } #[instrument(level = "trace", skip_all)] pub unsafe fn begin_command_buffer( device: &ash::Device, cb: vk::CommandBuffer, ) -> anyhow::Result<()> { device.reset_command_buffer(cb, vk::CommandBufferResetFlags::empty())?; device.begin_command_buffer( cb, &vk::CommandBufferBeginInfo::default().flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT), )?; Ok(()) } pub fn insert_image_barrier( device: &ash::Device, cb: vk::CommandBuffer, image: vk::Image, queue_transfer: Option<(u32, u32)>, old_layout: vk::ImageLayout, new_layout: vk::ImageLayout, src_stage: vk::PipelineStageFlags2, src_access: vk::AccessFlags2, dst_stage: vk::PipelineStageFlags2, dst_access: vk::AccessFlags2, ) { let (src_family, dst_family) = queue_transfer.unwrap_or((vk::QUEUE_FAMILY_IGNORED, vk::QUEUE_FAMILY_IGNORED)); let barriers = [vk::ImageMemoryBarrier2::default() .src_stage_mask(src_stage) .src_access_mask(src_access) .dst_stage_mask(dst_stage) .dst_access_mask(dst_access) .old_layout(old_layout) .new_layout(new_layout) .src_queue_family_index(src_family) .dst_queue_family_index(dst_family) .image(image) .subresource_range(vk::ImageSubresourceRange { aspect_mask: vk::ImageAspectFlags::COLOR, base_mip_level: 0, level_count: vk::REMAINING_MIP_LEVELS, base_array_layer: 0, layer_count: vk::REMAINING_ARRAY_LAYERS, })]; unsafe { device.cmd_pipeline_barrier2( cb, &vk::DependencyInfo::default().image_memory_barriers(&barriers), ) }; } fn contains_extension(list: &[CString], str: &CStr) -> bool { list.iter().any(|v| v.as_c_str() == str) } ================================================ FILE: mm-server/src/waking_sender.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: BUSL-1.1 use std::sync::Arc; pub struct WakingSender { waker: Arc, sender: crossbeam_channel::Sender, } impl Clone for WakingSender { fn clone(&self) -> Self { Self { waker: self.waker.clone(), sender: self.sender.clone(), } } } impl WakingSender { pub fn new(waker: Arc, sender: crossbeam_channel::Sender) -> Self { assert!( !sender.is_full(), "WakingSender must be created with a non-zero capacity channel" ); Self { waker, sender } } pub fn send(&self, msg: T) -> Result<(), crossbeam_channel::SendError> { self.sender.send(msg)?; self.waker.wake().unwrap(); Ok(()) } pub fn try_send(&self, msg: T) -> Result<(), crossbeam_channel::TrySendError> { self.sender.try_send(msg)?; self.waker.wake().unwrap(); Ok(()) } } pub struct WakingOneshot { waker: Arc, sender: oneshot::Sender, } impl WakingOneshot { pub fn new(waker: Arc, sender: oneshot::Sender) -> Self { Self { waker, sender } } pub fn send(self, msg: T) -> Result<(), oneshot::SendError> { self.sender.send(msg)?; self.waker.wake().unwrap(); Ok(()) } } ================================================ FILE: mmserver.default.toml ================================================ ## Copyright 2024 Colin Marc ## ## SPDX-License-Identifier: MIT ## ## This file specifies the configuration defaults for the magic mirror server. If ## a line is commented out, the default is to leave the value unset (and the ## setting is not required, unless stated otherwise). ## ## To determine the final config, the server merges the values in this file with ## the provided configuration file (by default, /etc/magic-mirror/mmserver.toml). ## ## All configuration files may be json instead of toml. ## ***-----------------*** ## *** Global Settings *** ## ***-----------------*** ## If set, this special setting instructs the server to load additional application ## configurations from the given files or directories. Each file (or file in in ## the directories) should be named using the scheme `.toml` or ## `.json`, where `` is the name of the application, following the ## rules outlined below. The contents should be identical to the configuration ## for an individual app. ## # include_apps = ["/etc/magic-mirror/apps.d"] ## This determines where the server stores application data, i.e. the $HOME for ## containerized applications. If not set, then $XDG_DATA_HOME/mmserver is used, ## or $HOME/.local/share/mmserver if $XDG_DATA_HOME is not set. ## ## If you're running magic-mirror as a permanent daemon, you should set this to ## something like /var/lib/magic-mirror. ## # data_home = "/var/lib/magic-mirror" ## ***-----------------*** ## *** Server Settings *** ## ***-----------------*** ## ## This section contains configuration options for the network server. [server] ## Where the server should listen for incoming connections. IPv6 addresses are ## supported. Use `0.0.0.0` or `[::]` to listen on all available interfaces. bind = "localhost:9599" ## If set, `bind` will be ignored, and the server will instead listen for ## incoming connections on the socket specified by the LISTEN_FDS environment ## variable. See the systemd documentation on "socket activation", here: ## bind_systemd = false ## Used for TLS. Both are required unless the host portion of the bind address ## resolves to a private address (as defined by RFCs 1918, 4193, and 6598) or ## otherwise not routable, for example `127.0.0.1`, `192.168.24.25`, or ## `fd12:3456:789a:1::1`. # tls_key = "/path/to/tls.key" # tls_cert = "/path/to/tls.cert" ## The number of threads to spawn for handling incoming requests. worker_threads = 8 ## The maximum number of concurrent connections the server will accept. Use `inf` ## to specify no limit. max_connections = 4 ## Whether to use mDNS to allow clients to discover the server. mdns = true ## Determines the FEC (forward error correction) ratio to use for each video ## layer. For example, an array of values ilke `[0.20, 0.10, 0.05]` would use ## 0.20 for the base layer, 0.10 for the second layer, etc. If a layer is not ## covered because the array isn't long enough, the FEC ratio for that layer ## defaults to 0.0. Use an empty array to disable FEC altogether. ## ## If hierarchical coding is not in use, then only the first value applies to ## all video frames. video_fec_ratios = [0.15] ## The hostname to advertise over mDNS. Defaults to `"$(uname -n).local.` if left ## unset, or ignored if `mdns` is `false`. # mdns_hostname = "mycomputer.local." ## The instance name to advertise over mDNS. Defaults to the unqualified value of ## `mdns_hostname`, converted to uppercase. # mdns_instance_name = "MYCOMPUTER" ## ***-------------------------*** ## *** Configured Applications *** ## ***-------------------------*** ## ## Each application you want to stream must be configured in advance, with each ## application as its own section. Applications can, alternatively, be ## configured as individual files. See the documentation for `include_apps` ## above for more information. At least one application must always be ## configured. ## ## App names must be unique and only contain characters in the set `[a-z0-9-_]`. ## The section is structured as a dictionary, with the key as the application ## name. ## ## An example application configuration follows. (Note that unlike the rest of ## this file, this application is not included in the default configuration.) # [apps.steam-big-picture] ## A short name for the app. # description = "Steam" ## The command to run. Must be in `$PATH` or absolute. # command = ["steam", "-gamepadui"] ## Key/value pairs to set in the environment when running the command. # environment = { "FOO" = "bar" } ## Configure a "path" for the application. Clients can use this to group apps ## into folders. This has nothing to do with the local filesystem. Paths should ## use unix path separators. They may include characters in the set ## `[A-Za-z0-9-_ ]` (including spaces). # app_path = "My Games/Puzzle Games" ## Add a header image to the app, for displaying in clients. The image must be a ## PNG file and less than 1mb. Any aspect ratio is permitted, but roughly 2:1 ## with a transparent background will work best. # header_image = "/path/to/image.png" ## Enable XWayland support for this application. This is required for any ## applications that are built for the legacy X11 windowing system, such as Steam. ## ## If unset, defaults to `default_app_settings.xwayland`. # xwayland = true ## Force the app to run at 1x. This is useful for applications where you know in ## advance they don't support any UI scaling, for example any application run ## through XWayland. This setting will ensure that the app always renders at the ## full session resolution, but may result in small font sizes or other UI ## elements. ## ## If unset, defaults to `default_app_settings.force_1x_scale`. # force_1x_scale = false ## How long to leave the session running without any client attached to it, in ## seconds. Use the value `inf` to specify no timeout. # session_timeout = 600 ## Isolate the home directory. If set, the application will see a clean, ## sandboxed `$HOME` (and `/home/$(whoami)`), rather than the system-wide one. ## This home directory is saved between runs of the app to ## `/homes/`. ## ## If unset, defaults to `default_app_settings.isolate_home`. # isolate_home = true ## If `isolate_home` is set to true, this sets a name for the home directory, ## can be shared between apps. For example, multiple apps with this option set ## to 'myhome' will all see the same $HOME when they run. By default, this is ## set to the name of the application. ## ## If unset, defaults to `default_app_settings.shared_home_name`. # shared_home_name = same as application name ## If `isolate_home` is set to true, this mounts a brand new $HOME (using tmpfs) ## each time the application is run. If set, `shared_home_name` is ignored. ## ## Note that any data saved while the app is running will be irrevocably ## destroyed when it exits. ## ## If unset, defaults to `default_app_settings.tmp_home`. # tmp_home = false ## ***----------------------*** ## *** Default App Settings *** ## ***----------------------*** ## ## This section can be used to set global defaults for all apps. Any setting here ## can be overriden in the configuration for each individual app. [default_app_settings] xwayland = true force_1x_scale = false session_timeout = 3600 # 1h isolate_home = true tmp_home = false ================================================ FILE: shader-common/color.slang ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT module color; // A set of color primaries, defined in terms of a transformation to/from XYZ // space. public struct PrimariesTransform { public float3x3 to_xyz; public float3x3 from_xyz; } // Named sets of color primaries. namespace Primaries { public static const PrimariesTransform BT709 = { float3x3( 0.4124564f, 0.3575761f, 0.1804375f, 0.2126729f, 0.7151522f, 0.0721750f, 0.0193339f, 0.1191920f, 0.9503041f), float3x3( 3.2404542f, -1.5371385f, -0.4985314f, -0.9692660f, 1.8760108f, 0.0415560f, 0.0556434f, -0.2040259f, 1.0572252f) }; public static const PrimariesTransform BT2020 = { float3x3( 0.636958f, 0.1446169f, 0.1688810f, 0.2627002f, 0.6779981f, 0.0593017f, 0.0000000f, 0.0280727f, 1.0609851f), float3x3( 1.7166512, -0.3556708, -0.2533663, -0.6666844, 1.6164812, 0.0157685, 0.0176399, -0.0427706, 0.9421031), }; } // Applies the sRGB EOTF to a color, producing linear values. public float3 srgb_eotf(float3 color) { return float3( srgb_eotf(color.r), srgb_eotf(color.g), srgb_eotf(color.b)); } // Applies the sRGB EOTF to one channel of a color, producing a linear value. public float srgb_eotf(float channel) { return channel > 0.04045 ? pow((channel + 0.055) / 1.055, 2.4) : channel / 12.92; } // Applies the inverse sRGB EOTF to a color, producing non-linear values. This // is sometimes called gamma correction. public float3 srgb_inverse_eotf(float3 color) { return float3( srgb_inverse_eotf(color.r), srgb_inverse_eotf(color.g), srgb_inverse_eotf(color.b)); } // Applies the inverse sRGB EOTF to one channel of a color, producing non-linear // values. This is sometimes called gamma correction. public float srgb_inverse_eotf(float channel) { return channel > 0.0031308 ? 1.055 * pow(channel, 1.0 / 2.4) - 0.055 : 12.92 * channel; } // Applies the BT.709 EOTF to a color, producing linear values. public float3 bt709_eotf(float3 color) { return float3( bt709_eotf(color.r), bt709_eotf(color.g), bt709_eotf(color.b)); } // Applies the BT.709 EOTF to one channel of a color, producing a linear value. public float bt709_eotf(float channel) { return channel > 0.081 ? pow((channel + 0.099) / 1.099, 1.0 / 0.45) : channel / 4.5; } // Applies the inverse BT.709 EOTF to a color, producing non-linear values. This // is sometimes called gamma correction. public float3 bt709_inverse_eotf(float3 color) { return float3( bt709_inverse_eotf(color.r), bt709_inverse_eotf(color.g), bt709_inverse_eotf(color.b)); } // Applies the inverse BT.709 EOTF to one channel of a color, producing non-linear // values. This is sometimes called gamma correction. public float bt709_inverse_eotf(float channel) { return channel >= 0.018 ? 1.099 * pow(channel, 1.0 / 2.2) - 0.099 : 4.5 * channel; } static const float PQ_M1 = 0.1593017578125; static const float PQ_M2 = 78.84375; static const float PQ_C1 = 0.8359375; static const float PQ_C2 = 18.8515625; static const float PQ_C3 = 18.6875; public static const float SDR_REFERENCE_WHITE = 203.0; public static const float PQ_MAX_WHITE = 10000.0; // Applies the Perceptual Quantizer EOTF to a color, producing linear values. // The input should be in the range [0, 1], where 1 corresponds to the maximum // 10,000 nits. public float3 pq_eotf(float3 color) { return float3( pq_eotf(color.r), pq_eotf(color.g), pq_eotf(color.b)); } // Applies the Perceptual Quantizer EOTF to a color channel, producing linear // values. The input should be in the range [0, 1], where 1 corresponds to the // maximum 10,000 nits. float pq_eotf(float channel) { let c = pow(channel, 1.0 / PQ_M2); return pow( max(c - PQ_C1, 0.0) / (PQ_C2 - PQ_C3 * c), 1.0 / PQ_M1); } // Applies the inverse Perceptual Quantizer EOTF to a color, producing non-linear // values. The output will be in the range [0, 1], where 1 corresponds to the // maximum 10,000 nits. public float3 pq_inverse_eotf(float3 color) { return float3( pq_inverse_eotf(color.r), pq_inverse_eotf(color.g), pq_inverse_eotf(color.b)); } // Applies the inverse Perceptual Quantizer EOTF to a color channel, producing a // non-linear value. The output will be in the range [0, 1], where 1 corresponds // to the maximum 10,000 nits. float pq_inverse_eotf(float channel) { let c = pow(channel, PQ_M1); return pow( (PQ_C1 + PQ_C2 * c) / (1.0 + PQ_C3 * c), PQ_M2); } // Transform a color from one set of primaries to another. The colors must be // linear, that is, they must have already been linearized using the relevant // OETF. public float4 transform(float4 color, PrimariesTransform pa, PrimariesTransform pb) { return float4( transform(color.rgb, pa, pb), color.a); } // Transform a color from one set of primaries to another. The colors must be // linear, that is, they must have already been linearized using the relevant // inverse EOTF. public float3 transform(float3 color, PrimariesTransform pa, PrimariesTransform pb) { let mat = mul(pb.from_xyz, pa.to_xyz); return mul(mat, color); } // Available conversions to and from YCbCr color space. public enum YCbCrModel { BT709, BT2020, } static const float3x3 YCBCR_709_MATRIX = float3x3( 0.2126, 0.7152, 0.0722, -0.114572, -0.385428, 0.5, 0.5, -0.454153, -0.045847); static const float3x3 YCBCR_2020_MATRIX = float3x3( 0.2627, 0.6780, 0.0593, -0.139630, -0.360370, 0.5, 0.5, -0.459786, -0.040214); // Encode a color in the YCbCr color system. The color should already be in // nonlinear space. public float3 encode_ycbcr(float3 color, YCbCrModel model, bool full_range) { float3 ycbcr; switch (model) { case YCbCrModel::BT709: ycbcr = mul(YCBCR_709_MATRIX, color); break; case YCbCrModel::BT2020: ycbcr = mul(YCBCR_2020_MATRIX, color); break; } // The matrix multiplication gives us Y in [0, 1] and Cb and Cr in [-0.5, 0.5]. ycbcr.y += 0.5; ycbcr.z += 0.5; if (!full_range) // This converts to "MPEG" or "Narrow" in the range [16, 235] and [16, 240]. ycbcr = float3( (219.0 * ycbcr.x + 16.0) / 256.0, (224.0 * ycbcr.y + 16.0) / 256.0, (224.0 * ycbcr.z + 16.0) / 256.0); return clamp(ycbcr, 0.0, 1.0); } ================================================ FILE: test-apps/Cargo.toml ================================================ # Copyright 2024 Colin Marc # # SPDX-License-Identifier: MIT [package] name = "latency-test" version = "0.1.0" edition = "2021" [[bin]] name = "latency-test" path = "bin/latency.rs" [[bin]] name = "color-test" path = "bin/color.rs" [[bin]] name = "cursorlock-test" path = "bin/cursorlock.rs" [dependencies] anyhow = "1.0.75" ash = "0.37.3" ash-window = "0.12.0" raw-window-handle = "0.5.2" clap = { version = "4.4.5", features = ["derive"] } glam = "0.27.0" imgui-rs-vulkan-renderer = { version = "1.12", features = [ "dynamic-rendering", ] } imgui = { version = "0.11", features = ["tables-api"] } imgui-winit-support = "0.11" [dependencies.winit] version = "0.29" default-features = false features = ["wayland", "rwh_05"] [dependencies.bevy] version = "0.15" default-features = false features = [ #"animation", #"bevy_asset", #"bevy_audio", "bevy_gilrs", #"bevy_scene", "bevy_winit", "bevy_core_pipeline", "bevy_pbr", #"bevy_gltf", "bevy_render", "bevy_sprite", #"bevy_text", #"bevy_ui", #"png", #"hdr", #"vorbis", #"x11", "wayland", #"bevy_gizmos", #"android_shared_stdcxx", "tonemapping_luts", #"default_font", #"webgl2", #"bevy_debug_stepping", ] [build-dependencies.slang] git = "https://github.com/colinmarc/slang-rs" rev = "075daa4faa8d1ab6d7bfbb5293812b087a527207" # Uses SLANG_DIR if set, otherwise builds slang from source features = ["from-source"] [patch.crates-io] imgui = { git = "https://github.com/colinmarc/imgui-rs" } imgui-winit-support = { git = "https://github.com/colinmarc/imgui-rs" } ================================================ FILE: test-apps/bin/color.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::{ ffi::{c_void, CStr, CString}, rc::Rc, time, }; use anyhow::{anyhow, Context}; use ash::{ extensions::{ ext::DebugUtils as DebugUtilsExt, khr::DynamicRendering as DynamicRenderingKhr, khr::Surface as SurfaceKhr, khr::Swapchain as SwapchainKhr, }, vk, }; use imgui_rs_vulkan_renderer as imgui_vulkan; use raw_window_handle::{HasRawDisplayHandle, HasRawWindowHandle}; use winit::{ event::{ElementState, Event, KeyEvent, MouseButton, WindowEvent}, event_loop::EventLoop, keyboard::{KeyCode, PhysicalKey}, window::WindowBuilder, }; struct ImguiContext { imgui: imgui::Context, platform: imgui_winit_support::WinitPlatform, } #[derive(Copy, Clone, Debug)] #[repr(C)] struct PushConstants { size: glam::Vec2, mouse: glam::Vec2, color_mul: f32, color_space: vk::ColorSpaceKHR, } struct VkDebugContext { debug: DebugUtilsExt, messenger: vk::DebugUtilsMessengerEXT, } struct DeviceInfo { device_name: CString, device_type: vk::PhysicalDeviceType, present_family: u32, } pub struct VkQueue { pub queue: vk::Queue, pub command_pool: vk::CommandPool, } struct Renderer { _entry: ash::Entry, instance: ash::Instance, device: ash::Device, swapchain_loader: SwapchainKhr, surface_loader: SurfaceKhr, dynamic_rendering_loader: DynamicRenderingKhr, debug: Option, pdevice: vk::PhysicalDevice, _device_info: DeviceInfo, surface: vk::SurfaceKHR, surface_formats: Vec, format: vk::Format, colorspace: vk::ColorSpaceKHR, pc: PushConstants, present_queue: VkQueue, width: u32, height: u32, imgui: Option, window: Rc, swapchain: Option, swapchain_dirty: bool, } struct Swapchain { swapchain: vk::SwapchainKHR, frames: Vec, present_images: Vec, current_frame: usize, imgui_renderer: Option, descriptor_set_layout: vk::DescriptorSetLayout, descriptor_pool: vk::DescriptorPool, pipeline_layout: vk::PipelineLayout, pipeline: vk::Pipeline, } struct InFlightFrame { render_cb: vk::CommandBuffer, render_fence: vk::Fence, image_acquired_sema: vk::Semaphore, render_complete_sema: vk::Semaphore, } struct SwapImage { image: vk::Image, view: vk::ImageView, } impl Renderer { fn new(window: Rc, debug: bool) -> anyhow::Result { let entry = unsafe { ash::Entry::load().context("failed to load vulkan libraries!") }?; eprintln!("creating vulkan instance"); let (major, minor) = match entry.try_enumerate_instance_version()? { // Vulkan 1.1+ Some(version) => ( vk::api_version_major(version), vk::api_version_minor(version), ), // Vulkan 1.0 None => (1, 0), }; if major < 1 || (major == 1 && minor < 2) { return Err(anyhow::anyhow!("vulkan 1.2 or higher is required")); } // MoltenVK doesn't actually support 1.3. let (major, minor) = if cfg!(any(target_os = "macos", target_os = "ios")) { (1, 2) } else { (major, minor) }; let app_info = vk::ApplicationInfo::builder() .application_name(c"c") .application_version(vk::make_api_version(0, 0, 1, 0)) .engine_name(c"No Engine") .engine_version(vk::make_api_version(0, 0, 1, 0)) .api_version(vk::make_api_version(0, major, minor, 0)); let mut extensions = ash_window::enumerate_required_extensions(window.raw_display_handle())?.to_vec(); let mut layers = Vec::new(); #[cfg(any(target_os = "macos", target_os = "ios"))] { extensions.push(vk::KhrPortabilityEnumerationFn::name().as_ptr()); // Enabling this extension is a requirement when using `VK_KHR_portability_subset` extensions.push(vk::KhrGetPhysicalDeviceProperties2Fn::name().as_ptr()); } if debug { let props = entry.enumerate_instance_extension_properties(None)?; let available_extensions = props .into_iter() .map(|properties| unsafe { CStr::from_ptr(&properties.extension_name as *const _).to_owned() }) .collect::>(); if !available_extensions .iter() .any(|ext| ext.as_c_str() == DebugUtilsExt::name()) { return Err(anyhow::anyhow!( "debug utils extension requested, but not available" )); } extensions.push(DebugUtilsExt::name().as_ptr()); let validation_layer = c"VK_LAYER_KHRONOS_validation"; let layer_props = entry.enumerate_instance_layer_properties()?; if layer_props .into_iter() .map(|properties| unsafe { CStr::from_ptr(&properties.layer_name as *const _) }) .any(|layer| layer == validation_layer) { layers.push(validation_layer.as_ptr()); } else { eprintln!("validation layers requested, but not available!") } } let instance = { let flags = if cfg!(any(target_os = "macos", target_os = "ios")) { vk::InstanceCreateFlags::ENUMERATE_PORTABILITY_KHR } else { vk::InstanceCreateFlags::default() }; let instance_create_info = vk::InstanceCreateInfo::builder() .flags(flags) .application_info(&app_info) .enabled_layer_names(&layers) .enabled_extension_names(&extensions); unsafe { entry.create_instance(&instance_create_info, None)? } }; let debug_utils = if debug { let debug_utils = DebugUtilsExt::new(&entry, &instance); let create_info = vk::DebugUtilsMessengerCreateInfoEXT::builder() .message_severity( vk::DebugUtilsMessageSeverityFlagsEXT::WARNING | vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE | vk::DebugUtilsMessageSeverityFlagsEXT::INFO | vk::DebugUtilsMessageSeverityFlagsEXT::ERROR, ) .message_type( vk::DebugUtilsMessageTypeFlagsEXT::GENERAL | vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE | vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION, ) .pfn_user_callback(Some(vulkan_debug_utils_callback)); let messenger = unsafe { debug_utils.create_debug_utils_messenger(&create_info, None) }?; Some(VkDebugContext { debug: debug_utils, messenger, }) } else { None }; let surface_loader = SurfaceKhr::new(&entry, &instance); let surface = unsafe { ash_window::create_surface( &entry, &instance, window.raw_display_handle(), window.raw_window_handle(), None, )? }; let devices = unsafe { instance.enumerate_physical_devices()? }; let mut devices = devices .into_iter() .enumerate() .flat_map( |(index, dev)| match query_device(&instance, &surface_loader, surface, dev) { Ok(info) => Some((index as u32, dev, info)), Err(err) => { let device_name = unsafe { CStr::from_ptr( instance .get_physical_device_properties(dev) .device_name .as_ptr(), ) .to_owned() }; eprintln!("gpu {device_name:?} ineligible: {err}"); None } }, ) .collect::>(); devices.sort_by_key(|(_, _, info)| match info.device_type { vk::PhysicalDeviceType::DISCRETE_GPU => 0, vk::PhysicalDeviceType::INTEGRATED_GPU => 1, _ => 2, }); if devices.is_empty() { return Err(anyhow!("no eligible GPU found!")); } let (index, pdevice, device_info) = devices.remove(0); eprintln!("selected gpu: {:?} ({index})", device_info.device_name); let device = { let queue_priorities = &[1.0]; let mut queue_indices = Vec::new(); queue_indices.push(device_info.present_family); queue_indices.dedup(); let queue_create_infos = queue_indices .iter() .map(|&index| { vk::DeviceQueueCreateInfo::builder() .queue_family_index(index) .queue_priorities(queue_priorities) .build() }) .collect::>(); let mut enabled_1_1_features = vk::PhysicalDeviceVulkan11Features::builder().sampler_ycbcr_conversion(true); let mut dynamic_rendering_features = vk::PhysicalDeviceDynamicRenderingFeatures::builder().dynamic_rendering(true); let selected_extensions = [ vk::KhrSwapchainFn::name().to_owned(), vk::KhrDynamicRenderingFn::name().to_owned(), #[cfg(any(target_os = "macos", target_os = "ios"))] vk::KhrPortabilitySubsetFn::name().to_owned(), ]; let extension_names = selected_extensions .iter() .map(|v| v.as_c_str().as_ptr()) .collect::>(); let device_create_info = vk::DeviceCreateInfo::builder() .queue_create_infos(&queue_create_infos) .enabled_extension_names(&extension_names) .push_next(&mut enabled_1_1_features) .push_next(&mut dynamic_rendering_features); unsafe { instance.create_device(pdevice, &device_create_info, None)? } }; let present_queue = get_queue_with_command_pool(&device, device_info.present_family)?; let window_size = window.inner_size(); let surface_formats = unsafe { surface_loader.get_physical_device_surface_formats(pdevice, surface)? }; for surface_format in &surface_formats { eprintln!( "available surface format: {:?} ({}) -> {:?} ({})", surface_format.format, surface_format.format.as_raw(), surface_format.color_space, surface_format.color_space.as_raw() ); } // Disable Vulkan's automatic sRGB conversion. let surface_formats = surface_formats .into_iter() .filter(|sf| !format_is_srgb(sf.format) && colorspace_supported(sf.color_space)) .collect::>(); let surface_format = surface_formats[0]; eprintln!( "using surface format: {:?} / {:?}", surface_format.format, surface_format.color_space, ); let swapchain_loader = SwapchainKhr::new(&instance, &device); let dynamic_rendering_loader = DynamicRenderingKhr::new(&instance, &device); let mut imgui = imgui::Context::create(); imgui.set_ini_filename(None); let mut imgui_platform = imgui_winit_support::WinitPlatform::init(&mut imgui); imgui_platform.attach_window( imgui.io_mut(), &window, imgui_winit_support::HiDpiMode::Default, ); let mut renderer = Self { _entry: entry, instance, device, swapchain_loader, surface_loader, dynamic_rendering_loader, debug: debug_utils, pdevice, _device_info: device_info, surface, surface_formats, format: surface_format.format, colorspace: surface_format.color_space, pc: PushConstants { size: glam::Vec2::new(window_size.width as f32, window_size.height as f32), mouse: glam::Vec2::ZERO, color_mul: 1.0, color_space: surface_format.color_space, }, present_queue, width: window_size.width, height: window_size.height, imgui: Some(ImguiContext { imgui, platform: imgui_platform, }), window, swapchain: None, swapchain_dirty: false, }; unsafe { renderer.recreate_swapchain()? }; Ok(renderer) } unsafe fn recreate_swapchain(&mut self) -> anyhow::Result<()> { let start = time::Instant::now(); let device = &self.device; let surface_format = self .surface_formats .iter() .find(|sf| sf.format == self.format && sf.color_space == self.colorspace) .expect("invalid format / colorspace combination"); eprintln!( "recreating swapchain with format {:?} / {:?}", surface_format.format, surface_format.color_space ); self.pc.color_space = surface_format.color_space; let surface_capabilities = self .surface_loader .get_physical_device_surface_capabilities(self.pdevice, self.surface) .unwrap(); let mut desired_image_count = surface_capabilities.min_image_count + 1; if surface_capabilities.max_image_count > 0 && desired_image_count > surface_capabilities.max_image_count { desired_image_count = surface_capabilities.max_image_count; } let surface_resolution = match surface_capabilities.current_extent.width { std::u32::MAX => vk::Extent2D { width: self.width, height: self.height, }, _ => surface_capabilities.current_extent, }; self.pc.size = glam::Vec2::new( surface_resolution.width as f32, surface_resolution.height as f32, ); let pre_transform = if surface_capabilities .supported_transforms .contains(vk::SurfaceTransformFlagsKHR::IDENTITY) { vk::SurfaceTransformFlagsKHR::IDENTITY } else { surface_capabilities.current_transform }; let present_modes = self .surface_loader .get_physical_device_surface_present_modes(self.pdevice, self.surface) .unwrap(); let mut present_modes = present_modes.clone(); present_modes.sort_by_key(|&mode| match mode { vk::PresentModeKHR::MAILBOX => 0, vk::PresentModeKHR::IMMEDIATE => 1, vk::PresentModeKHR::FIFO => 2, _ => 4, }); let present_mode = present_modes.first().unwrap(); if *present_mode != vk::PresentModeKHR::MAILBOX { eprintln!( "present mode MAILBOX not available, using {:?} (available: {:?})", present_mode, present_modes ); } let mut swapchain_create_info = vk::SwapchainCreateInfoKHR::builder() .surface(self.surface) .min_image_count(desired_image_count) .image_color_space(surface_format.color_space) .image_format(surface_format.format) .image_extent(surface_resolution) .image_usage(vk::ImageUsageFlags::COLOR_ATTACHMENT) .image_sharing_mode(vk::SharingMode::EXCLUSIVE) .pre_transform(pre_transform) .composite_alpha(vk::CompositeAlphaFlagsKHR::OPAQUE) .present_mode(*present_mode) .clipped(true) .image_array_layers(1); if let Some(old_swapchain) = self.swapchain.as_ref() { swapchain_create_info = swapchain_create_info.old_swapchain(old_swapchain.swapchain); } let swapchain = self .swapchain_loader .create_swapchain(&swapchain_create_info, None)?; let swapchain_images = self.swapchain_loader.get_swapchain_images(swapchain)?; let descriptor_set_layout = { let create_info = vk::DescriptorSetLayoutCreateInfo::builder(); unsafe { device.create_descriptor_set_layout(&create_info, None)? } }; let descriptor_pool = { let sampler_size = vk::DescriptorPoolSize::builder() .ty(vk::DescriptorType::COMBINED_IMAGE_SAMPLER) .descriptor_count(swapchain_images.len() as u32); let pool_sizes = &[sampler_size.build()]; let info = vk::DescriptorPoolCreateInfo::builder() .pool_sizes(pool_sizes) .max_sets(swapchain_images.len() as u32); unsafe { device.create_descriptor_pool(&info, None)? } }; let pipeline_layout = { let pc_ranges = [vk::PushConstantRange::builder() .stage_flags(vk::ShaderStageFlags::VERTEX | vk::ShaderStageFlags::FRAGMENT) .offset(0) .size(std::mem::size_of::() as u32) .build()]; let set_layouts = [descriptor_set_layout]; let create_info = vk::PipelineLayoutCreateInfo::builder() .set_layouts(&set_layouts) .push_constant_ranges(&pc_ranges); unsafe { device.create_pipeline_layout(&create_info, None)? } }; let pipeline = { let vert_bytes = include_bytes!(concat!(env!("OUT_DIR"), "/color-test/vert.spv")); let frag_bytes = include_bytes!(concat!(env!("OUT_DIR"), "/color-test/frag.spv")); let vert_shader = load_shader(device, vert_bytes).context("loading vert.spv")?; let frag_shader = load_shader(device, frag_bytes).context("loading frag.spv")?; let vert_stage = vk::PipelineShaderStageCreateInfo::builder() .stage(vk::ShaderStageFlags::VERTEX) .module(vert_shader) .name(c"main"); let frag_stage = vk::PipelineShaderStageCreateInfo::builder() .stage(vk::ShaderStageFlags::FRAGMENT) .module(frag_shader) .name(c"main"); let vertex_input_state = vk::PipelineVertexInputStateCreateInfo::builder(); let input_assembly_state = vk::PipelineInputAssemblyStateCreateInfo::builder() .topology(vk::PrimitiveTopology::TRIANGLE_STRIP) .primitive_restart_enable(false); let viewport = vk::Viewport::builder() .x(0.0) .y(0.0) .width(self.width as f32) .height(self.height as f32) .min_depth(0.0) .max_depth(1.0); let scissor = vk::Rect2D::builder().extent(vk::Extent2D { width: self.width, height: self.height, }); let viewports = [viewport.build()]; let scissors = [scissor.build()]; let viewport_state = vk::PipelineViewportStateCreateInfo::builder() .viewports(&viewports) .scissors(&scissors); let rasterization_state = vk::PipelineRasterizationStateCreateInfo::builder() .depth_clamp_enable(false) .rasterizer_discard_enable(false) .polygon_mode(vk::PolygonMode::FILL) .line_width(1.0) .depth_bias_enable(false) // Per https://www.saschawillems.de/blog/2016/08/13/vulkan-tutorial-on-rendering-a-fullscreen-quad-without-buffers .cull_mode(vk::CullModeFlags::FRONT) .front_face(vk::FrontFace::COUNTER_CLOCKWISE); let multisample_state = vk::PipelineMultisampleStateCreateInfo::builder() .sample_shading_enable(false) .rasterization_samples(vk::SampleCountFlags::TYPE_1); let attachment = vk::PipelineColorBlendAttachmentState::builder() .color_write_mask(vk::ColorComponentFlags::RGBA) .blend_enable(true) .src_color_blend_factor(vk::BlendFactor::SRC_ALPHA) .dst_color_blend_factor(vk::BlendFactor::ONE_MINUS_SRC_ALPHA) .color_blend_op(vk::BlendOp::ADD) .src_alpha_blend_factor(vk::BlendFactor::ONE) .dst_alpha_blend_factor(vk::BlendFactor::ZERO) .alpha_blend_op(vk::BlendOp::ADD); let attachments = [attachment.build()]; let color_blend_state = vk::PipelineColorBlendStateCreateInfo::builder() .logic_op_enable(false) .attachments(&attachments); let formats = [surface_format.format]; let mut pipeline_rendering = vk::PipelineRenderingCreateInfo::builder() .color_attachment_formats(&formats) .build(); let stages = [vert_stage.build(), frag_stage.build()]; let create_info = vk::GraphicsPipelineCreateInfo::builder() .stages(&stages) .vertex_input_state(&vertex_input_state) .input_assembly_state(&input_assembly_state) .viewport_state(&viewport_state) .rasterization_state(&rasterization_state) .multisample_state(&multisample_state) .color_blend_state(&color_blend_state) .layout(pipeline_layout) .push_next(&mut pipeline_rendering); unsafe { let pipeline = match device.create_graphics_pipelines( vk::PipelineCache::null(), &[create_info.build()], None, ) { Ok(pipelines) => Ok(pipelines[0]), Err((_, e)) => Err(e), }?; device.destroy_shader_module(vert_shader, None); device.destroy_shader_module(frag_shader, None); pipeline } }; let create_frame = || -> anyhow::Result { let render_cb = { let create_info = vk::CommandBufferAllocateInfo::builder() .level(vk::CommandBufferLevel::PRIMARY) .command_pool(self.present_queue.command_pool) .command_buffer_count(1); let cbs = device .allocate_command_buffers(&create_info) .context("failed to allocate render command buffer")?; cbs[0] }; let render_fence = create_fence(device, true)?; let image_acquired_sema = create_semaphore(device)?; let render_complete_sema = create_semaphore(device)?; Ok(InFlightFrame { render_cb, render_fence, image_acquired_sema, render_complete_sema, }) }; let frames = (0..swapchain_images.len()) .map(|_| create_frame()) .collect::>>()?; let swapchain_images = swapchain_images .into_iter() .map(|image| { let create_info = vk::ImageViewCreateInfo::builder() .image(image) .view_type(vk::ImageViewType::TYPE_2D) .format(surface_format.format) .components(vk::ComponentMapping { r: vk::ComponentSwizzle::IDENTITY, g: vk::ComponentSwizzle::IDENTITY, b: vk::ComponentSwizzle::IDENTITY, a: vk::ComponentSwizzle::IDENTITY, }) .subresource_range(vk::ImageSubresourceRange { aspect_mask: vk::ImageAspectFlags::COLOR, base_mip_level: 0, level_count: vk::REMAINING_MIP_LEVELS, base_array_layer: 0, layer_count: vk::REMAINING_ARRAY_LAYERS, }); let image_view = device .create_image_view(&create_info, None) .context("vkCreateImageView")?; Ok(SwapImage { image, view: image_view, }) }) .collect::>>()?; let imgui_renderer = if let Some(ImguiContext { imgui, .. }) = &mut self.imgui { Some(imgui_vulkan::Renderer::with_default_allocator( &self.instance, self.pdevice, device.clone(), self.present_queue.queue, self.present_queue.command_pool, imgui_vulkan::DynamicRendering { color_attachment_format: surface_format.format, depth_attachment_format: None, }, imgui, Some(imgui_vulkan::Options { in_flight_frames: frames.len(), ..Default::default() }), )?) } else { None }; let swapchain = Swapchain { swapchain, frames, present_images: swapchain_images, current_frame: 0, descriptor_pool, descriptor_set_layout, pipeline_layout, pipeline, imgui_renderer, }; eprintln!("recreated swapchain in {:?}", start.elapsed()); if let Some(old_swapchain) = self.swapchain.replace(swapchain) { self.destroy_swapchain(old_swapchain); }; Ok(()) } fn handle_event(&mut self, event: &winit::event::Event) -> anyhow::Result<()> { if let Some(ImguiContext { platform, imgui, .. }) = self.imgui.as_mut() { platform.handle_event(imgui.io_mut(), &self.window, event); } match event { winit::event::Event::WindowEvent { window_id, event: winit::event::WindowEvent::Resized(size), } if *window_id == self.window.id() => { self.resize(size.width, size.height); } _ => (), } Ok(()) } fn resize(&mut self, width: u32, height: u32) { if self.width == width && self.height == height { return; } self.width = width; self.height = height; self.swapchain_dirty = true; } unsafe fn render(&mut self) -> anyhow::Result<()> { if self.swapchain_dirty || self.swapchain.is_none() { self.recreate_swapchain()?; self.swapchain_dirty = false; } let device = &self.device; let swapchain = self.swapchain.as_mut().unwrap(); let num_frames = swapchain.frames.len(); let frame = &mut swapchain.frames[swapchain.current_frame]; swapchain.current_frame = (swapchain.current_frame + 1) % num_frames; // Wait for the gpu to catch up. device.wait_for_fences(&[frame.render_fence], true, u64::MAX)?; let result = self.swapchain_loader.acquire_next_image( swapchain.swapchain, u64::MAX, frame.image_acquired_sema, vk::Fence::null(), ); let swapchain_index = match result { Ok((image_index, _)) => image_index, Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => { // Recreate and try again. self.swapchain_dirty = true; return self.render(); } Err(e) => return Err(e.into()), }; let present_image = swapchain .present_images .get(swapchain_index as usize) .unwrap(); // Reset the command buffer. device.reset_command_buffer(frame.render_cb, vk::CommandBufferResetFlags::empty())?; // Begin the command buffer. { let begin_info = vk::CommandBufferBeginInfo::builder() .flags(vk::CommandBufferUsageFlags::ONE_TIME_SUBMIT); device.begin_command_buffer(frame.render_cb, &begin_info)?; } // Transition the present image to be writable. cmd_image_barrier( device, frame.render_cb, present_image.image, vk::PipelineStageFlags::TOP_OF_PIPE, vk::AccessFlags::empty(), vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT, vk::AccessFlags::COLOR_ATTACHMENT_WRITE, vk::ImageLayout::UNDEFINED, vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL, ); // Begin rendering. { let rect: vk::Rect2D = vk::Rect2D::builder() .extent(vk::Extent2D { width: self.width, height: self.height, }) .build(); let clear_value = vk::ClearValue { color: vk::ClearColorValue { float32: [0.0, 0.0, 0.0, 1.0], }, }; let color_attachment = vk::RenderingAttachmentInfo::builder() .image_view(present_image.view) .image_layout(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL) .load_op(vk::AttachmentLoadOp::CLEAR) .store_op(vk::AttachmentStoreOp::STORE) .clear_value(clear_value) .build(); let color_attachments = [color_attachment]; let rendering_info = vk::RenderingInfo::builder() .render_area(rect) .color_attachments(&color_attachments) .layer_count(1); self.dynamic_rendering_loader .cmd_begin_rendering(frame.render_cb, &rendering_info); device.cmd_bind_pipeline( frame.render_cb, vk::PipelineBindPoint::GRAPHICS, swapchain.pipeline, ); } device.cmd_push_constants( frame.render_cb, swapchain.pipeline_layout, vk::ShaderStageFlags::VERTEX | vk::ShaderStageFlags::FRAGMENT, 0, std::slice::from_raw_parts( &self.pc as *const _ as *const u8, std::mem::size_of::(), ), ); // Draw the triangle. device.cmd_draw(frame.render_cb, 3, 1, 0, 0); // Draw the overlay. if let Some(ImguiContext { platform, imgui }) = self.imgui.as_mut() { let mut formats = self .surface_formats .iter() .map(|sf| sf.format) .collect::>(); let mut colorspaces = self .surface_formats .iter() .map(|sf| sf.color_space) .collect::>(); formats.sort(); formats.dedup(); colorspaces.sort(); colorspaces.dedup(); let format_names = formats .iter() .map(|f| format!("{:?}", f)) .collect::>(); let cs_names = colorspaces .iter() .map(|c| format!("{:?}", c)) .collect::>(); let mut format_idx = formats.iter().position(|&f| f == self.format).unwrap() as i32; let mut cs_idx = colorspaces .iter() .position(|&c| c == self.colorspace) .unwrap() as i32; platform.prepare_frame(imgui.io_mut(), &self.window)?; { let ui = imgui.new_frame(); let [width, _height] = ui.io().display_size; let _padding = ui.push_style_var(imgui::StyleVar::WindowPadding([8.0, 8.0])); let _rounding = ui.push_style_var(imgui::StyleVar::WindowRounding(4.0)); let _frame_rounding = ui.push_style_var(imgui::StyleVar::FrameRounding(4.0)); if let Some(_window) = ui .window("controls") .position([width - 16.0, 16.0], imgui::Condition::Always) .position_pivot([1.0, 0.0]) .bg_alpha(0.8) .size([250.0, 300.0], imgui::Condition::Always) .begin() { let _stretch = ui.push_item_width(-1.0); ui.text("Format:"); ui.list_box( "##format", &mut format_idx, &format_names.iter().map(|f| f.as_str()).collect::>(), 4, ); ui.text("Color Space:"); ui.list_box( "##cs", &mut cs_idx, &cs_names.iter().map(|f| f.as_str()).collect::>(), 4, ); ui.text("Headroom:"); ui.slider("##headroom", 0.75, 4.0, &mut self.pc.color_mul); } platform.prepare_render(ui, &self.window); } let renderer = swapchain.imgui_renderer.as_mut().unwrap(); renderer.cmd_draw(frame.render_cb, imgui.render())?; if formats[format_idx as usize] != self.format { self.format = formats[format_idx as usize]; self.swapchain_dirty = true; } if colorspaces[cs_idx as usize] != self.colorspace { self.colorspace = colorspaces[cs_idx as usize]; self.swapchain_dirty = true; } } // Done rendereng. self.dynamic_rendering_loader .cmd_end_rendering(frame.render_cb); // Transition the present image to be presentable. cmd_image_barrier( device, frame.render_cb, present_image.image, vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT, vk::AccessFlags::COLOR_ATTACHMENT_WRITE, vk::PipelineStageFlags::BOTTOM_OF_PIPE, vk::AccessFlags::empty(), vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL, vk::ImageLayout::PRESENT_SRC_KHR, ); // Submit and present! { let present_queue = self.present_queue.queue; device.end_command_buffer(frame.render_cb)?; device.reset_fences(&[frame.render_fence])?; let cbs = [frame.render_cb]; let wait_semas = [frame.image_acquired_sema]; let signal_semas = [frame.render_complete_sema]; let wait_stages = [vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT]; let submit_info = vk::SubmitInfo::builder() .command_buffers(&cbs) .wait_semaphores(&wait_semas) .wait_dst_stage_mask(&wait_stages) .signal_semaphores(&signal_semas); let submits = [submit_info.build()]; device.queue_submit(present_queue, &submits, frame.render_fence)?; // This "helps winit [with stuff]". It also seems to increase latency. self.window.pre_present_notify(); let wait_semas = [frame.render_complete_sema]; let swapchains = [swapchain.swapchain]; let image_indices = [swapchain_index]; let present_info = vk::PresentInfoKHR::builder() .wait_semaphores(&wait_semas) .swapchains(&swapchains) .image_indices(&image_indices); self.swapchain_dirty = match self .swapchain_loader .queue_present(present_queue, &present_info) { Ok(false) => self.swapchain_dirty, Ok(true) => true, Err(vk::Result::ERROR_OUT_OF_DATE_KHR) => true, Err(e) => return Err(e.into()), }; } // Render again! if self.swapchain_dirty { return self.render(); } Ok(()) } unsafe fn destroy_swapchain(&mut self, mut swapchain: Swapchain) { let device = &self.device; device.device_wait_idle().unwrap(); for frame in swapchain.frames.drain(..) { device.free_command_buffers(self.present_queue.command_pool, &[frame.render_cb]); device.destroy_fence(frame.render_fence, None); device.destroy_semaphore(frame.image_acquired_sema, None); device.destroy_semaphore(frame.render_complete_sema, None); } for swap_img in swapchain.present_images.drain(..) { // Destroying the swapchain does this. // device.destroy_image(swap_img.image, None); device.destroy_image_view(swap_img.view, None); } device.destroy_pipeline_layout(swapchain.pipeline_layout, None); device.destroy_descriptor_pool(swapchain.descriptor_pool, None); device.destroy_descriptor_set_layout(swapchain.descriptor_set_layout, None); device.destroy_pipeline(swapchain.pipeline, None); self.swapchain_loader .destroy_swapchain(swapchain.swapchain, None) } } impl Drop for Renderer { fn drop(&mut self) { unsafe { if let Some(swapchain) = self.swapchain.take() { self.destroy_swapchain(swapchain); } self.device .destroy_command_pool(self.present_queue.command_pool, None); if let Some(debug) = self.debug.take() { debug .debug .destroy_debug_utils_messenger(debug.messenger, None); } if let Some(imgui) = self.imgui.take() { drop(imgui); } self.surface_loader.destroy_surface(self.surface, None); self.device.destroy_device(None); self.instance.destroy_instance(None); } } } fn main() -> anyhow::Result<()> { let event_loop = EventLoop::new()?; let window = WindowBuilder::new() .with_title("Colorful Triangle") .with_inner_size(winit::dpi::LogicalSize::new(800.0, 600.0)) .build(&event_loop) .unwrap(); let window = Rc::new(window); let mut renderer = Renderer::new(window.clone(), cfg!(debug_assertions))?; let mut mouse_pressed = false; let mut mouse_pos = glam::Vec2::ZERO; event_loop.run(move |event, el| { renderer.handle_event(&event).expect("resize failed"); match event { Event::AboutToWait { .. } => { window.request_redraw(); } Event::WindowEvent { window_id, event } if window_id == window.id() => { match event { WindowEvent::CloseRequested | WindowEvent::KeyboardInput { event: KeyEvent { state: ElementState::Pressed, physical_key: PhysicalKey::Code(KeyCode::Escape), .. }, .. } => el.exit(), WindowEvent::MouseInput { state, button: MouseButton::Left, .. } => { mouse_pressed = state == ElementState::Pressed; } WindowEvent::CursorMoved { position, .. } => { let phys_size = window.inner_size(); let mouse_x = position.x as f32 / phys_size.width as f32 - 0.5; let mouse_y = position.y as f32 / phys_size.height as f32 - 0.5; mouse_pos = glam::Vec2::new(mouse_x, mouse_y); } WindowEvent::RedrawRequested => unsafe { renderer.render().expect("render failed") }, _ => (), }; if mouse_pressed { renderer.pc.mouse = mouse_pos; } } _ => (), } })?; Ok(()) } fn query_device( instance: &ash::Instance, surface_loader: &SurfaceKhr, surface: vk::SurfaceKHR, device: vk::PhysicalDevice, ) -> anyhow::Result { let props = unsafe { instance.get_physical_device_properties(device) }; let device_type = props.device_type; let device_name = unsafe { CStr::from_ptr(props.device_name.as_ptr()).to_owned() }; let queue_families = unsafe { instance .get_physical_device_queue_family_properties(device) .into_iter() .collect::>() }; let present_family = queue_families .iter() .enumerate() .find(|(idx, properties)| { properties.queue_flags.contains(vk::QueueFlags::GRAPHICS) && properties.queue_flags.contains(vk::QueueFlags::COMPUTE) && unsafe { surface_loader .get_physical_device_surface_support(device, *idx as u32, surface) .unwrap_or(false) } }) .map(|(index, _)| index as u32) .to_owned() .ok_or_else(|| anyhow::anyhow!("no graphics queue found"))?; let available_extensions = unsafe { instance .enumerate_device_extension_properties(device) .unwrap() .into_iter() .map(|properties| CStr::from_ptr(&properties.extension_name as *const _).to_owned()) .collect::>() }; let ext_swapchain = SwapchainKhr::name(); if !available_extensions .iter() .any(|ext| **ext == *ext_swapchain) { return Err(anyhow::anyhow!("no swapchain extension found")); } Ok(DeviceInfo { device_name, device_type, present_family, }) } fn get_queue_with_command_pool(device: &ash::Device, idx: u32) -> Result { let queue = unsafe { device.get_device_queue(idx, 0) }; let command_pool = unsafe { let create_info = vk::CommandPoolCreateInfo::builder() .queue_family_index(idx) .flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER); device.create_command_pool(&create_info, None)? }; Ok(VkQueue { queue, command_pool, }) } fn create_fence(device: &ash::Device, signalled: bool) -> Result { let mut create_info = vk::FenceCreateInfo::builder(); if signalled { create_info = create_info.flags(vk::FenceCreateFlags::SIGNALED); } let fence = unsafe { device.create_fence(&create_info, None)? }; Ok(fence) } fn create_semaphore(device: &ash::Device) -> Result { let semaphore = unsafe { device.create_semaphore(&vk::SemaphoreCreateInfo::default(), None)? }; Ok(semaphore) } #[allow(clippy::too_many_arguments)] fn cmd_image_barrier( device: &ash::Device, command_buffer: vk::CommandBuffer, image: vk::Image, src_stage_mask: vk::PipelineStageFlags, src_access_mask: vk::AccessFlags, dst_stage_mask: vk::PipelineStageFlags, dst_access_mask: vk::AccessFlags, old_layout: vk::ImageLayout, new_layout: vk::ImageLayout, ) { let barrier = vk::ImageMemoryBarrier::builder() .src_access_mask(src_access_mask) .dst_access_mask(dst_access_mask) .old_layout(old_layout) .new_layout(new_layout) .image(image) .subresource_range(vk::ImageSubresourceRange { aspect_mask: vk::ImageAspectFlags::COLOR, base_mip_level: 0, level_count: 1, base_array_layer: 0, layer_count: 1, }) .build(); unsafe { device.cmd_pipeline_barrier( command_buffer, src_stage_mask, dst_stage_mask, vk::DependencyFlags::empty(), &[], &[], &[barrier], ) }; } fn load_shader(device: &ash::Device, bytes: &[u8]) -> anyhow::Result { let code = ash::util::read_spv(&mut std::io::Cursor::new(bytes))?; let create_info = vk::ShaderModuleCreateInfo::builder().code(&code); let shader = unsafe { device.create_shader_module(&create_info, None)? }; Ok(shader) } fn format_is_srgb(format: vk::Format) -> bool { matches!( format, vk::Format::R8_SRGB | vk::Format::R8G8_SRGB | vk::Format::R8G8B8_SRGB | vk::Format::B8G8R8_SRGB | vk::Format::R8G8B8A8_SRGB | vk::Format::B8G8R8A8_SRGB | vk::Format::A8B8G8R8_SRGB_PACK32 | vk::Format::BC1_RGB_SRGB_BLOCK | vk::Format::BC1_RGBA_SRGB_BLOCK | vk::Format::BC2_SRGB_BLOCK | vk::Format::BC3_SRGB_BLOCK | vk::Format::BC7_SRGB_BLOCK | vk::Format::ETC2_R8G8B8_SRGB_BLOCK | vk::Format::ETC2_R8G8B8A1_SRGB_BLOCK | vk::Format::ETC2_R8G8B8A8_SRGB_BLOCK | vk::Format::ASTC_4X4_SRGB_BLOCK | vk::Format::ASTC_5X4_SRGB_BLOCK | vk::Format::ASTC_5X5_SRGB_BLOCK | vk::Format::ASTC_6X5_SRGB_BLOCK | vk::Format::ASTC_6X6_SRGB_BLOCK | vk::Format::ASTC_8X5_SRGB_BLOCK | vk::Format::ASTC_8X6_SRGB_BLOCK | vk::Format::ASTC_8X8_SRGB_BLOCK | vk::Format::ASTC_10X5_SRGB_BLOCK | vk::Format::ASTC_10X6_SRGB_BLOCK | vk::Format::ASTC_10X8_SRGB_BLOCK | vk::Format::ASTC_10X10_SRGB_BLOCK | vk::Format::ASTC_12X10_SRGB_BLOCK | vk::Format::ASTC_12X12_SRGB_BLOCK ) } fn colorspace_supported(colorspace: vk::ColorSpaceKHR) -> bool { matches!( colorspace, vk::ColorSpaceKHR::SRGB_NONLINEAR | vk::ColorSpaceKHR::EXTENDED_SRGB_LINEAR_EXT | vk::ColorSpaceKHR::DISPLAY_P3_LINEAR_EXT | vk::ColorSpaceKHR::DISPLAY_P3_NONLINEAR_EXT | vk::ColorSpaceKHR::DCI_P3_NONLINEAR_EXT | vk::ColorSpaceKHR::BT709_LINEAR_EXT | vk::ColorSpaceKHR::BT709_NONLINEAR_EXT | vk::ColorSpaceKHR::HDR10_ST2084_EXT ) } unsafe extern "system" fn vulkan_debug_utils_callback( _message_severity: vk::DebugUtilsMessageSeverityFlagsEXT, message_type: vk::DebugUtilsMessageTypeFlagsEXT, p_callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT, _userdata: *mut c_void, ) -> vk::Bool32 { let _ = std::panic::catch_unwind(|| { let message = unsafe { CStr::from_ptr((*p_callback_data).p_message) }.to_string_lossy(); let ty = format!("{:?}", message_type).to_lowercase(); eprintln!("VULKAN[{}]: {}", ty, message); }); // Must always return false. vk::FALSE } ================================================ FILE: test-apps/bin/cursorlock.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT // Adapted from: // https://bevyengine.org/examples/camera/first-person-view-model/ use bevy::color::palettes::tailwind; use bevy::input::mouse::MouseMotion; use bevy::pbr::NotShadowCaster; use bevy::prelude::*; use bevy::render::view::RenderLayers; use bevy::window::{CursorGrabMode, PrimaryWindow}; fn main() { App::new() .add_plugins(DefaultPlugins) .add_systems(Startup, (spawn_view_model, spawn_world_model, spawn_lights)) .add_systems(Update, (move_player, toggle_cursor_lock, update_cursor)) .insert_resource(CursorLocked(true)) .run(); } #[derive(Debug, Component)] struct Player; #[derive(Debug, Component, Deref, DerefMut)] struct CameraSensitivity(Vec2); impl Default for CameraSensitivity { fn default() -> Self { Self( // These factors are just arbitrary mouse sensitivity values. // It's often nicer to have a faster horizontal sensitivity than vertical. // We use a component for them so that we can make them user-configurable at runtime // for accessibility reasons. // It also allows you to inspect them in an editor if you `Reflect` the component. Vec2::new(0.003, 0.002), ) } } #[derive(Debug, Component)] struct WorldModelCamera; #[derive(Debug, Resource)] struct CursorLocked(bool); /// Used implicitly by all entities without a `RenderLayers` component. /// Our world model camera and all objects other than the player are on this /// layer. The light source belongs to both layers. const DEFAULT_RENDER_LAYER: usize = 0; /// Used by the view model camera and the player's arm. /// The light source belongs to both layers. const VIEW_MODEL_RENDER_LAYER: usize = 1; fn spawn_view_model( mut commands: Commands, mut meshes: ResMut>, mut materials: ResMut>, ) { let arm = meshes.add(Cuboid::new(0.1, 0.1, 0.5)); let arm_material = materials.add(Color::from(tailwind::TEAL_200)); commands .spawn(( Player, CameraSensitivity::default(), Transform::from_xyz(0.0, 1.0, 0.0), Visibility::default(), )) .with_children(|parent| { parent.spawn(( WorldModelCamera, Camera3d::default(), Projection::from(PerspectiveProjection { fov: 90.0_f32.to_radians(), ..default() }), )); // Spawn view model camera. parent.spawn(( Camera3d::default(), Camera { // Bump the order to render on top of the world model. order: 1, ..default() }, Projection::from(PerspectiveProjection { fov: 70.0_f32.to_radians(), ..default() }), // Only render objects belonging to the view model. RenderLayers::layer(VIEW_MODEL_RENDER_LAYER), )); // Spawn the player's right arm. parent.spawn(( Mesh3d(arm), MeshMaterial3d(arm_material), Transform::from_xyz(0.2, -0.1, -0.25), // Ensure the arm is only rendered by the view model camera. RenderLayers::layer(VIEW_MODEL_RENDER_LAYER), // The arm is free-floating, so shadows would look weird. NotShadowCaster, )); }); } fn spawn_world_model( mut commands: Commands, mut meshes: ResMut>, mut materials: ResMut>, ) { let floor = meshes.add(Plane3d::new(Vec3::Y, Vec2::splat(10.0))); let cube = meshes.add(Cuboid::new(2.0, 0.5, 1.0)); let material = materials.add(Color::WHITE); // The world model camera will render the floor and the cubes spawned in this // system. Assigning no `RenderLayers` component defaults to layer 0. commands.spawn((Mesh3d(floor), MeshMaterial3d(material.clone()))); commands.spawn(( Mesh3d(cube.clone()), MeshMaterial3d(material.clone()), Transform::from_xyz(0.0, 0.25, -3.0), )); commands.spawn(( Mesh3d(cube), MeshMaterial3d(material), Transform::from_xyz(0.75, 1.75, 0.0), )); } fn spawn_lights(mut commands: Commands) { commands.spawn(( PointLight { color: Color::from(tailwind::ROSE_300), shadows_enabled: true, ..default() }, Transform::from_xyz(-2.0, 4.0, -0.75), // The light source illuminates both the world model and the view model. RenderLayers::from_layers(&[DEFAULT_RENDER_LAYER, VIEW_MODEL_RENDER_LAYER]), )); } fn move_player( mut mouse_motion: EventReader, mut player: Query<&mut Transform, With>, cursor_locked: Res, ) { if !cursor_locked.0 { return; } let mut transform = player.single_mut(); for motion in mouse_motion.read() { let yaw = -motion.delta.x * 0.003; let pitch = -motion.delta.y * 0.002; // Order of rotations is important, see transform.rotate_y(yaw); transform.rotate_local_x(pitch); } } fn toggle_cursor_lock(input: Res>, mut cursor_locked: ResMut) { if input.just_pressed(KeyCode::Escape) { cursor_locked.0 = !cursor_locked.0 } } fn update_cursor( cursor_locked: Res, mut q_windows: Query<&mut Window, With>, ) { if !cursor_locked.is_changed() { return; } let mut primary_window = q_windows.single_mut(); if cursor_locked.0 { primary_window.cursor_options.grab_mode = CursorGrabMode::Locked; primary_window.cursor_options.visible = false; } else { primary_window.cursor_options.grab_mode = CursorGrabMode::None; primary_window.cursor_options.visible = true; } } ================================================ FILE: test-apps/bin/latency.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use bevy::{ prelude::*, window::{PresentMode, PrimaryWindow, WindowResolution}, }; use clap::Parser; const BLOCK_SIZE: f32 = 32.0; const STARTING_POS: Vec3 = Vec3::new(-BLOCK_SIZE / 2.0, BLOCK_SIZE / 2.0, 0.0); #[derive(Debug, Clone, Copy, PartialEq, Eq, Resource)] enum InputMode { Keyboard, Mouse, Gamepad, } #[derive(Debug, Parser)] #[command(name = "latency-test")] #[command(about = "The Magic Mirror latency test app", long_about = None)] struct Cli { /// Mouse mode. #[arg(long)] mouse: bool, #[arg(long)] gamepad: bool, } #[derive(Component)] struct Box(i8); fn main() { let args = Cli::parse(); let input_mode = match (args.mouse, args.gamepad) { (true, true) => { eprintln!("at most one of --mouse and --gamepad must be specified"); std::process::exit(1); } (true, false) => InputMode::Mouse, (false, true) => InputMode::Gamepad, _ => InputMode::Keyboard, }; App::new() .add_plugins(DefaultPlugins.set(WindowPlugin { primary_window: Some(Window { title: "Latency Test".to_string(), resolution: WindowResolution::new(BLOCK_SIZE * 8.0, BLOCK_SIZE * 8.0), present_mode: PresentMode::Mailbox, ..Default::default() }), ..Default::default() })) .insert_resource(ClearColor(Color::BLACK)) .insert_resource(input_mode) .add_systems(Startup, setup) .add_systems(Update, move_box) .run(); } fn setup(mut commands: Commands, input_mode: Res) { let starting_pos = if *input_mode == InputMode::Mouse || *input_mode == InputMode::Gamepad { STARTING_POS } else { // Offscreen. Vec3::new(BLOCK_SIZE * -100.0, BLOCK_SIZE * 100.0, 0.0) }; commands.spawn(Camera2d::default()); commands.spawn(( Sprite { color: Color::WHITE, custom_size: Some(Vec2::new(BLOCK_SIZE, BLOCK_SIZE)), anchor: bevy::sprite::Anchor::TopLeft, ..default() }, Transform::from_translation(starting_pos), Box(-1), )); } fn move_box( keyboard_input: Res>, input_mode: Res, gamepads: Query<&Gamepad>, q_windows: Query<&Window, With>, q_camera: Query<(&Camera, &GlobalTransform)>, mut q_box: Query<(&mut Box, &mut Transform)>, ) { let (mut b, mut transform) = q_box.single_mut(); let window = q_windows.single(); let (camera, camera_transform) = q_camera.single(); match *input_mode { InputMode::Gamepad => { for gamepad in &gamepads { if gamepad.just_pressed(GamepadButton::South) { transform.translation = STARTING_POS; } let rx = gamepad.get(GamepadAxis::RightStickX).unwrap(); let ry = gamepad.get(GamepadAxis::RightStickY).unwrap(); transform.translation += Vec3::new(rx, ry, 0.0); } } InputMode::Mouse => { if let Some(position) = window .cursor_position() .and_then(|cursor| camera.viewport_to_world(camera_transform, cursor).ok()) .map(|ray| ray.origin.truncate()) { transform.translation.x = position.x - BLOCK_SIZE / 2.0; transform.translation.y = position.y + BLOCK_SIZE / 2.0; } } InputMode::Keyboard => { if keyboard_input.just_pressed(KeyCode::Space) { b.0 = (b.0 + 1) % 64; let y = b.0 / 8; let x = b.0 % 8; transform.translation.x = BLOCK_SIZE * (-4.0 + x as f32); transform.translation.y = BLOCK_SIZE * (4.0 - y as f32); } } } } ================================================ FILE: test-apps/build.rs ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT use std::path::PathBuf; extern crate slang; fn main() { let mut session = slang::GlobalSession::new(); let out_dir = std::env::var("OUT_DIR").map(PathBuf::from).unwrap(); compile_shader( &mut session, "src/color-test.slang", out_dir.join("color-test/frag.spv").to_str().unwrap(), "frag", slang::Stage::Fragment, ); compile_shader( &mut session, "src/color-test.slang", out_dir.join("color-test/vert.spv").to_str().unwrap(), "vert", slang::Stage::Vertex, ); } fn compile_shader( session: &mut slang::GlobalSession, in_path: &str, out_path: &str, entry_point: &str, stage: slang::Stage, ) { std::fs::create_dir_all(PathBuf::from(out_path).parent().unwrap()) .expect("failed to create output directory"); let mut compile_request = session.create_compile_request(); compile_request .set_codegen_target(slang::CompileTarget::Spirv) .set_optimization_level(slang::OptimizationLevel::Maximal) .set_target_profile(session.find_profile("glsl_460")); let entry_point = compile_request .add_translation_unit(slang::SourceLanguage::Slang, None) .add_source_file(in_path) .add_entry_point(entry_point, stage); let shader_bytecode = compile_request .compile() .expect("Shader compilation failed."); std::fs::write(out_path, shader_bytecode.get_entry_point_code(entry_point)) .expect("failed to write shader bytecode to file"); println!("cargo::rerun-if-changed={}", in_path); } ================================================ FILE: test-apps/src/color-test.slang ================================================ // Copyright 2024 Colin Marc // // SPDX-License-Identifier: MIT struct PushConstants { float2 size; float2 mouse; float mul; int color_space; }; static const int VK_COLOR_SPACE_SRGB_NONLINEAR_EXT = 0; static const int VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1_000_104_001; static const int VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1_000_104_002; static const int VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT = 1_000_104_003; static const int VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1_000_104_004; static const int VK_COLOR_SPACE_BT709_LINEAR_EXT = 1_000_104_005; static const int VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1_000_104_006; static const int VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1_000_104_007; static const int VK_COLOR_SPACE_HDR10_ST2084_EXT = 1_000_104_008; static const int VK_COLOR_SPACE_HDR10_HLG_EXT = 1_000_104_010; static const int VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT = 1_000_104_014; [[vk::push_constant]] PushConstants pc; struct VertOutput { float2 uv : TextureCoord; float2 mouse : MouseCoord; float4 position : SV_Position; }; [shader("vertex")] VertOutput vert(uint vertexID: SV_VertexID) { VertOutput output; float2 aspect; if (pc.size.x > pc.size.y) { aspect = float2(pc.size.x / pc.size.y, 1.0); } else { aspect = float2(1.0, pc.size.y / pc.size.x); } let uv = float2((vertexID << 1) & 2, vertexID & 2); output.uv = uv; output.mouse = pc.mouse * aspect; output.position = float4((uv * 2.0 - 1.0) / aspect, 0.0, 1.0); return output; } // Adapted from "Color Wheel", by GoldenCrystal: // https://www.shadertoy.com/view/MsXXzX static const float M_PI = 3.14159265358979323846; static const float AA = 250; float3 hue(float2 pos) { float theta = 3.0 + 3.0 * atan2(pos.x, pos.y) / M_PI; float3 color = float3(0.0); return clamp(abs(((theta + float3(0.0, 4.0, 2.0)) % 6.0) - 3.0) - 1.0, 0.0, 1.0); } float4 color_wheel(float2 coord, float2 mouse) { float l = length(coord); float m = length(mouse); float4 color = float4(0.0); if (l >= 0.75 && l <= 1.0) { l = 1.0 - abs((l - 0.875) * 8.0); l = clamp(l * AA * 0.0625, 0.0, 1.0); // Antialiasing approximation color = float4(l * hue(coord), l); } else if (l < 0.75) { float3 picked; if (m < 0.75 || m > 1.0) { mouse = float2(0.0, -1.0); picked = float3(1.0, 0.0, 0.0); } else { picked = hue(mouse); } coord = coord / 0.75; mouse = normalize(mouse); float sat = 1.5 - (dot(coord, mouse) + 0.5); // [0.0,1.5] if (sat < 1.5) { float h = sat / sqrt(3.0); float2 om = float2(cross(float3(mouse, 0.0), float3(0.0, 0.0, 1.0)).xy); float lum = dot(coord, om); if (abs(lum) <= h) { let l = clamp((h - abs(lum)) * AA * 0.5, 0.0, 1.0) * clamp((1.5 - sat) / 1.5 * AA * 0.5, 0.0, 1.0); // Fake antialiasing return float4(l * lerp(picked, float3(0.5 * (lum + h) / h), sat / 1.5), l); } } } return color; } [shader("fragment")] float4 frag( float2 uv: TextureCoord, float2 mouse: MouseCoord) : SV_Target { float2 uv = uv.xy * 2.0 - 1.0; float2 mouse = mouse * 2.0; uv = uv / 0.75; mouse = mouse / 0.75; let color = color_wheel(uv, mouse) * pc.mul; switch (pc.color_space) { case VK_COLOR_SPACE_SRGB_NONLINEAR_EXT: return delinearize_srgb(color); case VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT: return delinearize_srgb(color); case VK_COLOR_SPACE_BT709_NONLINEAR_EXT: return delinearize_bt709(color); case VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT: return delinearize_dci_p3(color); case VK_COLOR_SPACE_HDR10_ST2084_EXT: return delinearize_pq(color); default: return color; } } float4 delinearize_srgb(float4 color) { return float4( delinearize_srgb(color.r), delinearize_srgb(color.g), delinearize_srgb(color.b), color.a); } float delinearize_srgb(float channel) { return channel > 0.0031308 ? 1.055 * pow(channel, 1.0 / 2.4) - 0.055 : 12.92 * channel; } float4 delinearize_bt709(float4 color) { return float4( delinearize_bt709(color.r), delinearize_bt709(color.g), delinearize_bt709(color.b), color.a); } float delinearize_bt709(float channel) { return channel >= 0.018 ? 1.099 * pow(channel, 1.0 / 2.2) - 0.099 : 4.5 * channel; } float4 delinearize_dci_p3(float4 color) { return float4( delinearize_dci_p3(color.r), delinearize_dci_p3(color.g), delinearize_dci_p3(color.b), color.a); } float delinearize_dci_p3(float channel) { return pow(channel, 1.0/2.6); } static const float PQ_M1 = 0.1593017578125; static const float PQ_M2 = 78.84375; static const float PQ_C1 = 0.8359375; static const float PQ_C2 = 18.8515625; static const float PQ_C3 = 18.6875; static const float REFERENCE_WHITE = 203.0; float4 delinearize_pq(float4 color) { return float4( delinearize_pq(color.r), delinearize_pq(color.g), delinearize_pq(color.b), color.a); } float delinearize_pq(float channel) { let c = pow(channel * REFERENCE_WHITE / 10000.0, PQ_M1); return pow( (PQ_C1 + PQ_C2 * c) / (1.0 + PQ_C3 * c), PQ_M2); }