Repository: containerd/rust-extensions Branch: main Commit: 1471c147e24f Files: 264 Total size: 991.7 KB Directory structure: gitextract__mh1hr60/ ├── .gitattributes ├── .github/ │ ├── dependabot.yml │ ├── labeler.yml │ ├── release.yml │ └── workflows/ │ ├── ci.yml │ ├── cover.yml │ ├── labeler.yml │ ├── publish.yml │ └── stale.yml ├── .gitignore ├── Cargo.toml ├── LICENSE ├── MAINTAINERS ├── README.md ├── clippy.toml ├── codecov.yml ├── crates/ │ ├── client/ │ │ ├── Cargo.toml │ │ ├── README.md │ │ ├── build.rs │ │ ├── examples/ │ │ │ ├── container.rs │ │ │ ├── container_events.rs │ │ │ ├── container_pull.rs │ │ │ ├── container_spec.json │ │ │ └── version.rs │ │ ├── rsync.txt │ │ ├── src/ │ │ │ └── lib.rs │ │ └── vendor/ │ │ ├── README.md │ │ ├── github.com/ │ │ │ └── containerd/ │ │ │ └── containerd/ │ │ │ ├── api/ │ │ │ │ ├── events/ │ │ │ │ │ ├── container.proto │ │ │ │ │ ├── content.proto │ │ │ │ │ ├── image.proto │ │ │ │ │ ├── namespace.proto │ │ │ │ │ ├── sandbox.proto │ │ │ │ │ ├── snapshot.proto │ │ │ │ │ └── task.proto │ │ │ │ ├── services/ │ │ │ │ │ ├── containers/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── containers.proto │ │ │ │ │ ├── content/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── content.proto │ │ │ │ │ ├── diff/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── diff.proto │ │ │ │ │ ├── events/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── events.proto │ │ │ │ │ ├── images/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── images.proto │ │ │ │ │ ├── introspection/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── introspection.proto │ │ │ │ │ ├── leases/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── leases.proto │ │ │ │ │ ├── mounts/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── mounts.proto │ │ │ │ │ ├── namespaces/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── namespace.proto │ │ │ │ │ ├── sandbox/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── sandbox.proto │ │ │ │ │ ├── snapshots/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── snapshots.proto │ │ │ │ │ ├── streaming/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── streaming.proto │ │ │ │ │ ├── tasks/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── tasks.proto │ │ │ │ │ ├── transfer/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── transfer.proto │ │ │ │ │ ├── ttrpc/ │ │ │ │ │ │ └── events/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── events.proto │ │ │ │ │ └── version/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── version.proto │ │ │ │ └── types/ │ │ │ │ ├── descriptor.proto │ │ │ │ ├── event.proto │ │ │ │ ├── fieldpath.proto │ │ │ │ ├── introspection.proto │ │ │ │ ├── metrics.proto │ │ │ │ ├── mount.proto │ │ │ │ ├── platform.proto │ │ │ │ ├── runc/ │ │ │ │ │ └── options/ │ │ │ │ │ └── oci.proto │ │ │ │ ├── runtimeoptions/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── api.proto │ │ │ │ ├── sandbox.proto │ │ │ │ ├── task/ │ │ │ │ │ └── task.proto │ │ │ │ └── transfer/ │ │ │ │ ├── container.proto │ │ │ │ ├── imagestore.proto │ │ │ │ ├── importexport.proto │ │ │ │ ├── progress.proto │ │ │ │ ├── registry.proto │ │ │ │ └── streaming.proto │ │ │ └── vendor/ │ │ │ └── github.com/ │ │ │ └── containerd/ │ │ │ └── containerd/ │ │ │ └── api/ │ │ │ ├── events/ │ │ │ │ ├── container.proto │ │ │ │ ├── content.proto │ │ │ │ ├── image.proto │ │ │ │ ├── namespace.proto │ │ │ │ ├── sandbox.proto │ │ │ │ ├── snapshot.proto │ │ │ │ └── task.proto │ │ │ ├── services/ │ │ │ │ ├── containers/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── containers.proto │ │ │ │ ├── content/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── content.proto │ │ │ │ ├── diff/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── diff.proto │ │ │ │ ├── events/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── events.proto │ │ │ │ ├── images/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── images.proto │ │ │ │ ├── introspection/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── introspection.proto │ │ │ │ ├── leases/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── leases.proto │ │ │ │ ├── mounts/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── mounts.proto │ │ │ │ ├── namespaces/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── namespace.proto │ │ │ │ ├── sandbox/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── sandbox.proto │ │ │ │ ├── snapshots/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── snapshots.proto │ │ │ │ ├── streaming/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── streaming.proto │ │ │ │ ├── tasks/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── tasks.proto │ │ │ │ ├── transfer/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── transfer.proto │ │ │ │ ├── ttrpc/ │ │ │ │ │ └── events/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── events.proto │ │ │ │ └── version/ │ │ │ │ └── v1/ │ │ │ │ └── version.proto │ │ │ └── types/ │ │ │ ├── descriptor.proto │ │ │ ├── event.proto │ │ │ ├── fieldpath.proto │ │ │ ├── introspection.proto │ │ │ ├── metrics.proto │ │ │ ├── mount.proto │ │ │ ├── platform.proto │ │ │ ├── runc/ │ │ │ │ └── options/ │ │ │ │ └── oci.proto │ │ │ ├── runtimeoptions/ │ │ │ │ └── v1/ │ │ │ │ └── api.proto │ │ │ ├── sandbox.proto │ │ │ ├── task/ │ │ │ │ └── task.proto │ │ │ └── transfer/ │ │ │ ├── container.proto │ │ │ ├── imagestore.proto │ │ │ ├── importexport.proto │ │ │ ├── progress.proto │ │ │ ├── registry.proto │ │ │ └── streaming.proto │ │ └── google/ │ │ ├── protobuf/ │ │ │ ├── any.proto │ │ │ ├── descriptor.proto │ │ │ ├── empty.proto │ │ │ ├── field_mask.proto │ │ │ └── timestamp.proto │ │ └── rpc/ │ │ └── status.proto │ ├── logging/ │ │ ├── Cargo.toml │ │ ├── README.md │ │ ├── examples/ │ │ │ └── journal.rs │ │ └── src/ │ │ └── lib.rs │ ├── runc/ │ │ ├── Cargo.toml │ │ ├── README.md │ │ └── src/ │ │ ├── asynchronous/ │ │ │ ├── io.rs │ │ │ ├── mod.rs │ │ │ ├── pipe.rs │ │ │ └── runc.rs │ │ ├── container.rs │ │ ├── error.rs │ │ ├── events.rs │ │ ├── lib.rs │ │ ├── monitor.rs │ │ ├── options.rs │ │ ├── synchronous/ │ │ │ ├── io.rs │ │ │ ├── mod.rs │ │ │ ├── pipe.rs │ │ │ └── runc.rs │ │ └── utils.rs │ ├── runc-shim/ │ │ ├── Cargo.toml │ │ ├── README.md │ │ ├── build.rs │ │ └── src/ │ │ ├── cgroup_memory.rs │ │ ├── common.rs │ │ ├── console.rs │ │ ├── container.rs │ │ ├── io.rs │ │ ├── main.rs │ │ ├── processes.rs │ │ ├── runc.rs │ │ ├── service.rs │ │ └── task.rs │ ├── shim/ │ │ ├── Cargo.toml │ │ ├── README.md │ │ ├── examples/ │ │ │ ├── publish.rs │ │ │ ├── skeleton.rs │ │ │ ├── skeleton_async.rs │ │ │ └── windows_log_reader.rs │ │ └── src/ │ │ ├── args.rs │ │ ├── asynchronous/ │ │ │ ├── mod.rs │ │ │ ├── monitor.rs │ │ │ ├── publisher.rs │ │ │ └── util.rs │ │ ├── cgroup.rs │ │ ├── error.rs │ │ ├── event.rs │ │ ├── lib.rs │ │ ├── logger.rs │ │ ├── monitor.rs │ │ ├── mount_linux.rs │ │ ├── mount_other.rs │ │ ├── reap.rs │ │ ├── synchronous/ │ │ │ ├── mod.rs │ │ │ ├── monitor.rs │ │ │ ├── publisher.rs │ │ │ └── util.rs │ │ └── util.rs │ ├── shim-protos/ │ │ ├── Cargo.toml │ │ ├── README.md │ │ ├── build.rs │ │ ├── examples/ │ │ │ ├── connect-async.rs │ │ │ ├── connect.rs │ │ │ ├── ttrpc-client-async.rs │ │ │ ├── ttrpc-client.rs │ │ │ ├── ttrpc-server-async.rs │ │ │ └── ttrpc-server.rs │ │ ├── rsync.txt │ │ ├── src/ │ │ │ ├── cgroups.rs │ │ │ ├── cgroups_v2.rs │ │ │ ├── events.rs │ │ │ ├── lib.rs │ │ │ ├── sandbox.rs │ │ │ ├── shim.rs │ │ │ ├── topics.rs │ │ │ ├── types.rs │ │ │ └── windows.rs │ │ ├── tests/ │ │ │ └── ttrpc.rs │ │ └── vendor/ │ │ ├── README.md │ │ ├── github.com/ │ │ │ └── containerd/ │ │ │ ├── cgroups/ │ │ │ │ ├── cgroup2/ │ │ │ │ │ └── stats/ │ │ │ │ │ └── metrics.proto │ │ │ │ ├── stats/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── metrics.proto │ │ │ │ └── v3/ │ │ │ │ └── cgroup1/ │ │ │ │ └── stats/ │ │ │ │ └── metrics.proto │ │ │ └── containerd/ │ │ │ ├── api/ │ │ │ │ ├── events/ │ │ │ │ │ ├── container.proto │ │ │ │ │ ├── content.proto │ │ │ │ │ ├── image.proto │ │ │ │ │ ├── namespace.proto │ │ │ │ │ ├── sandbox.proto │ │ │ │ │ ├── snapshot.proto │ │ │ │ │ └── task.proto │ │ │ │ ├── runtime/ │ │ │ │ │ ├── sandbox/ │ │ │ │ │ │ └── v1/ │ │ │ │ │ │ └── sandbox.proto │ │ │ │ │ └── task/ │ │ │ │ │ └── v2/ │ │ │ │ │ └── shim.proto │ │ │ │ ├── services/ │ │ │ │ │ └── ttrpc/ │ │ │ │ │ └── events/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── events.proto │ │ │ │ └── types/ │ │ │ │ ├── descriptor.proto │ │ │ │ ├── event.proto │ │ │ │ ├── fieldpath.proto │ │ │ │ ├── introspection.proto │ │ │ │ ├── metrics.proto │ │ │ │ ├── mount.proto │ │ │ │ ├── platform.proto │ │ │ │ ├── runc/ │ │ │ │ │ └── options/ │ │ │ │ │ └── oci.proto │ │ │ │ ├── sandbox.proto │ │ │ │ └── task/ │ │ │ │ └── task.proto │ │ │ └── vendor/ │ │ │ └── github.com/ │ │ │ └── containerd/ │ │ │ └── containerd/ │ │ │ └── api/ │ │ │ ├── events/ │ │ │ │ ├── container.proto │ │ │ │ ├── content.proto │ │ │ │ ├── image.proto │ │ │ │ ├── namespace.proto │ │ │ │ ├── sandbox.proto │ │ │ │ ├── snapshot.proto │ │ │ │ └── task.proto │ │ │ ├── runtime/ │ │ │ │ ├── sandbox/ │ │ │ │ │ └── v1/ │ │ │ │ │ └── sandbox.proto │ │ │ │ └── task/ │ │ │ │ └── v2/ │ │ │ │ └── shim.proto │ │ │ ├── services/ │ │ │ │ └── ttrpc/ │ │ │ │ └── events/ │ │ │ │ └── v1/ │ │ │ │ └── events.proto │ │ │ └── types/ │ │ │ ├── descriptor.proto │ │ │ ├── event.proto │ │ │ ├── fieldpath.proto │ │ │ ├── introspection.proto │ │ │ ├── metrics.proto │ │ │ ├── mount.proto │ │ │ ├── platform.proto │ │ │ ├── runc/ │ │ │ │ └── options/ │ │ │ │ └── oci.proto │ │ │ ├── sandbox.proto │ │ │ └── task/ │ │ │ └── task.proto │ │ ├── gogoproto/ │ │ │ └── gogo.proto │ │ ├── google/ │ │ │ └── protobuf/ │ │ │ ├── any.proto │ │ │ ├── descriptor.proto │ │ │ ├── empty.proto │ │ │ └── timestamp.proto │ │ └── microsoft/ │ │ └── hcsshim/ │ │ └── cmd/ │ │ └── containerd-shim-runhcs-v1/ │ │ └── stats/ │ │ └── stats.proto │ └── snapshots/ │ ├── Cargo.toml │ ├── README.md │ ├── build.rs │ ├── examples/ │ │ └── snapshotter.rs │ ├── rsync.txt │ ├── src/ │ │ ├── convert.rs │ │ ├── lib.rs │ │ └── wrap.rs │ └── vendor/ │ ├── github.com/ │ │ └── containerd/ │ │ └── containerd/ │ │ ├── api/ │ │ │ ├── services/ │ │ │ │ └── snapshots/ │ │ │ │ └── v1/ │ │ │ │ └── snapshots.proto │ │ │ └── types/ │ │ │ └── mount.proto │ │ └── vendor/ │ │ └── github.com/ │ │ └── containerd/ │ │ └── containerd/ │ │ └── api/ │ │ ├── services/ │ │ │ └── snapshots/ │ │ │ └── v1/ │ │ │ └── snapshots.proto │ │ └── types/ │ │ └── mount.proto │ ├── gogoproto/ │ │ └── gogo.proto │ └── google/ │ └── protobuf/ │ ├── descriptor.proto │ ├── empty.proto │ ├── field_mask.proto │ └── timestamp.proto ├── deny.toml ├── rust-toolchain.toml ├── rustfmt.toml └── scripts/ ├── install-protobuf.sh └── update-vendor.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitattributes ================================================ * text=auto eol=lf ================================================ FILE: .github/dependabot.yml ================================================ version: 2 updates: - package-ecosystem: "cargo" directory: "/" schedule: interval: "daily" labels: - T-dependencies # Ensure that references to actions in a repository's workflow.yml file are kept up to date. # See https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/keeping-your-actions-up-to-date-with-dependabot - package-ecosystem: "github-actions" directory: "/" schedule: interval: "daily" labels: # Mark PRs as CI related change. - T-CI ================================================ FILE: .github/labeler.yml ================================================ # Automatically assign labels to PRs. # `C-` project crate(s) affected. # `T-` change type (CI, docs, etc). C-client: - changed-files: - any-glob-to-any-file: crates/client/** C-logging: - changed-files: - any-glob-to-any-file: crates/logging/** C-runc: - changed-files: - any-glob-to-any-file: crates/runc/** C-runc-shim: - changed-files: - any-glob-to-any-file: crates/runc-shim/** C-shim: - changed-files: - any-glob-to-any-file: crates/shim/** C-shim-protos: - changed-files: - any-glob-to-any-file: crates/shim-protos/** C-snapshots: - changed-files: - any-glob-to-any-file: crates/snapshots/** T-CI: - changed-files: - any-glob-to-any-file: [".github/**", "*.toml"] T-docs: - changed-files: - any-glob-to-any-file: "**/*.md" ================================================ FILE: .github/release.yml ================================================ changelog: categories: - title: Runc crate labels: - C-runc - title: Runc shim crate labels: - C-runc-shim - title: Shim crate labels: - C-shim - title: Shim protos crate labels: - C-shim-protos - title: Snapshots crate labels: - C-snapshots - title: Client crate labels: - C-client - title: Logging crate labels: - C-logging - title: Other changes labels: - T-CI - T-docs ================================================ FILE: .github/workflows/ci.yml ================================================ name: CI on: pull_request: push: merge_group: schedule: - cron: '0 0 * * *' # Every day at midnight jobs: checks: name: Checks runs-on: ${{ matrix.os }} timeout-minutes: 20 strategy: matrix: include: - os: ubuntu-latest target: x86_64-unknown-linux-gnu - os: ubuntu-latest target: x86_64-unknown-linux-musl - os: macos-latest target: aarch64-apple-darwin steps: - uses: actions/checkout@v6 - run: ./scripts/install-protobuf.sh shell: bash - if: matrix.target == 'x86_64-unknown-linux-musl' run: sudo apt-get update && sudo apt-get install -y musl-tools && rustup target add x86_64-unknown-linux-musl - run: rustup toolchain install nightly --component rustfmt --target ${{ matrix.target }} - run: cargo +nightly fmt --all -- --check # the "runc" and "containerd-shim" crates have `sync` code that is not covered by the workspace - run: cargo check -p runc --all-targets --target ${{ matrix.target }} - run: cargo clippy -p runc --all-targets --target ${{ matrix.target }} -- -D warnings - run: cargo check -p containerd-shim --all-targets --target ${{ matrix.target }} - run: cargo clippy -p containerd-shim --all-targets --target ${{ matrix.target }} -- -D warnings # check the workspace - run: cargo check --examples --tests --all-targets --target ${{ matrix.target }} - run: cargo check --examples --tests --all-targets --all-features --target ${{ matrix.target }} - run: cargo clippy --all-targets --target ${{ matrix.target }} -- -D warnings - run: cargo clippy --all-targets --all-features --target ${{ matrix.target }} -- -D warnings - run: cargo doc --no-deps --features docs env: RUSTDOCFLAGS: -Dwarnings - name: check unused dependencies uses: bnjbvr/cargo-machete@v0.9.2 env: RUSTUP_TOOLCHAIN: "stable" # TODO: Merge this with the checks job above windows-checks: name: Windows Checks runs-on: windows-latest timeout-minutes: 20 steps: - uses: actions/checkout@v6 - run: ./scripts/install-protobuf.sh shell: bash - run: cargo check --examples --tests -p containerd-shim -p containerd-shim-protos -p containerd-client - run: rustup toolchain install nightly --component rustfmt - run: cargo +nightly fmt -p containerd-shim -p containerd-shim-protos -p containerd-client -- --check - run: cargo clippy -p containerd-shim -p containerd-shim-protos -- -D warnings - run: cargo doc --no-deps -p containerd-shim -p containerd-shim-protos -p containerd-client env: RUSTDOCFLAGS: -Dwarnings tests: name: Tests runs-on: ${{ matrix.os }} timeout-minutes: 15 strategy: matrix: os: [ubuntu-latest, macos-latest, windows-latest] steps: - uses: actions/checkout@v6 - run: ./scripts/install-protobuf.sh shell: bash - run: | # runc-shim::cgroup::test_add_cgroup needs root permission to set cgroup mkdir -p /tmp/dummy-xdr sudo -E $(command -v cargo) test sudo -E $(command -v cargo) test --all-features # the shim has sync code that is not covered when running with --all-features sudo -E $(command -v cargo) test -p containerd-shim if: ${{ !contains(matrix.os, 'windows') }} env: # runc::tests::test_exec needs $XDG_RUNTIME_DIR to be set XDG_RUNTIME_DIR: /tmp/dummy-xdr - run: cargo test -p containerd-shim -p containerd-shim-protos -p containerd-client if: ${{ contains(matrix.os, 'windows') }} # Collect build timings # See https://blog.rust-lang.org/2022/04/07/Rust-1.60.0.html#cargo---timings timings: name: Timings runs-on: ubuntu-latest timeout-minutes: 15 steps: - uses: actions/checkout@v6 - run: ./scripts/install-protobuf.sh shell: bash - run: cargo build --all-features --timings - uses: actions/upload-artifact@v7 with: name: timings path: target/cargo-timings/cargo-timing.html if-no-files-found: error deny: name: Deny runs-on: ubuntu-latest timeout-minutes: 10 steps: - uses: actions/checkout@v6 - uses: EmbarkStudios/cargo-deny-action@v2 linux-integration: name: Linux Integration runs-on: ${{ matrix.os }} timeout-minutes: 40 strategy: matrix: os: [ubuntu-latest] containerd: [v1.7.30, v2.1.6, v2.2.1] steps: - name: Checkout extensions uses: actions/checkout@v6 - name: Download containerd archive env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | gh release download ${{ matrix.containerd }} \ --repo containerd/containerd \ --pattern 'containerd-1.*-linux-amd64.tar.gz' \ --pattern 'containerd-2.*-linux-amd64.tar.gz' \ --output containerd.tar.gz - name: Extract containerd binaries to $HOME/.local/bin run: | mkdir -p $HOME/.local/bin echo "$HOME/.local/bin" >> $GITHUB_PATH tar -xf containerd.tar.gz -C $HOME/.local - name: Checkout containerd uses: actions/checkout@v6 with: repository: containerd/containerd path: src/github.com/containerd/containerd ref: ${{ matrix.containerd }} - name: Install shim run: | cargo build --release --bin containerd-shim-runc-v2-rs sudo install -D ./target/release/containerd-shim-runc-v2-rs /usr/local/bin/ ## get latest go version for integrations tests so we can skip runnings tests - uses: actions/setup-go@v6 - name: Integration env: TEST_RUNTIME: "io.containerd.runc.v2-rs" TESTFLAGS_PARALLEL: 1 EXTRA_TESTFLAGS: "-no-criu -test.skip='(TestContainerPTY|TestContainerExecLargeOutputWithTTY|TestTaskUpdate|TestTaskResize|TestContainerAttach|TestContainerAttachProcess|TestRuntimeInfo)'" TESTFLAGS_RACE: "-race" # Pretend crun for now, remove after https://github.com/containerd/containerd/pull/9829 RUNC_FLAVOR: "crun" run: | sudo -E PATH=$PATH make integration working-directory: src/github.com/containerd/containerd windows-integration: name: Windows Integration runs-on: ${{ matrix.os }} timeout-minutes: 40 strategy: matrix: os: [windows-latest] containerd: [1.7.0] steps: - name: Checkout extensions uses: actions/checkout@v6 - run: ./scripts/install-protobuf.sh shell: bash - name: Install containerd run: | $ErrorActionPreference = "Stop" # Install containerd https://github.com/containerd/containerd/blob/v1.7.0/docs/getting-started.md#installing-containerd-on-windows # Download and extract desired containerd Windows binaries curl.exe -L https://github.com/containerd/containerd/releases/download/v${{ matrix.containerd }}/containerd-${{ matrix.containerd }}-windows-amd64.tar.gz -o containerd-windows-amd64.tar.gz tar.exe xvf .\containerd-windows-amd64.tar.gz # Copy and configure mkdir "$Env:ProgramFiles\containerd" Copy-Item -Path ".\bin\*" -Destination "$Env:ProgramFiles\containerd" -Recurse -Force cd $Env:ProgramFiles\containerd\ .\containerd.exe config default | Out-File config.toml -Encoding ascii # Review the configuration. Depending on setup you may want to adjust: # - the sandbox_image (Kubernetes pause image) # - cni bin_dir and conf_dir locations Get-Content config.toml # Register and start service .\containerd.exe --register-service Start-Service containerd working-directory: ${{ runner.temp }} - name: Run integration test run: | $ErrorActionPreference = "Stop" get-service containerd $env:TTRPC_ADDRESS="\\.\pipe\containerd-containerd.ttrpc" # run the example cargo run -p containerd-shim --example skeleton -- -namespace default -id 1234 -address "\\.\pipe\containerd-containerd" -publish-binary ./bin/containerd start ps skeleton cargo run -p containerd-shim-protos --example shim-proto-connect \\.\pipe\containerd-shim-bc764c65e177434fcefe8257dc440be8b8acf7c96156320d965938f7e9ae1a35-pipe $skeleton = get-process skeleton -ErrorAction SilentlyContinue if ($skeleton) { exit 1 } - name: Run client run: | $ErrorActionPreference = "Stop" get-service containerd cargo run -p containerd-client --example version # Currently Github actions UI supports no masks to mark matrix jobs as required to pass status checks. # This means that every time version of Go, containerd, or OS is changed, a corresponding job should # be added to the list of required checks. Which is not very convenient. # To workaround this, a special job is added to report statuses of all other jobs, with fixed title. # So it needs to be added to the list of required checks only once. # # See https://github.com/orgs/community/discussions/26822 results: name: Report required job statuses runs-on: ubuntu-latest # List job dependencies which are required to pass status checks in order to be merged via merge queue. needs: [checks, windows-checks, tests, deny, linux-integration, windows-integration] if: ${{ always() }} steps: - run: exit 1 if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }} ================================================ FILE: .github/workflows/cover.yml ================================================ name: Coverage on: push: branches: "main" pull_request: branches: "main" jobs: coverage: name: Collect runs-on: ubuntu-latest timeout-minutes: 15 permissions: statuses: write steps: - uses: actions/checkout@v6 - run: | sudo apt-get install protobuf-compiler - name: Install grcov run: | cargo install --locked grcov@0.8.24 grcov --version - name: Tests env: RUSTFLAGS: "-Cinstrument-coverage" LLVM_PROFILE_FILE: "target/coverage/%p-%m.profraw" run: | sudo -E $(command -v cargo) test --all-features # Fix permissions after sudo. sudo chown -R $(whoami) target/coverage/ - name: Collect coverage data run: | grcov . \ --source-dir . \ --binary-path ./target/debug/ \ --branch \ --ignore-not-existing \ --output-types markdown,lcov \ --keep-only 'crates/*' \ --output-path ./target/coverage/ - name: Upload coverage data uses: codecov/codecov-action@v6 with: token: ${{ secrets.CODECOV_TOKEN }} flags: unittests files: ./target/coverage/lcov verbose: true - name: Publish job summary run: | echo "# Coverage" >> $GITHUB_STEP_SUMMARY cat target/coverage/markdown.md >> $GITHUB_STEP_SUMMARY ================================================ FILE: .github/workflows/labeler.yml ================================================ name: PR Labeler on: # Runs workflow when activity on a PR in the workflow's repository occurs. pull_request_target: jobs: triage: permissions: contents: read pull-requests: write name: Assign labels runs-on: ubuntu-latest timeout-minutes: 5 # Required by gh env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} PR_URL: ${{ github.event.pull_request.html_url }} steps: - uses: actions/labeler@v6 with: # Auto-include paths starting with dot (e.g. .github) dot: true # Remove labels when matching files are reverted or no longer changed by the PR sync-labels: true # Apply OS-windows label if PR title contains 'Windows' - run: gh pr edit $PR_URL --add-label OS-windows if: contains(github.event.pull_request.title, 'Windows') ================================================ FILE: .github/workflows/publish.yml ================================================ # Automates crate publishing # - Specify crate and version from the menu. # - Launch the job: # + Updates Cargo.toml with the specified version # + Commits and pushes the version bump # + Publishes to crates.io # + Adds and pushes a git tag "-v" name: Release on: workflow_dispatch: inputs: crate: description: "Crate to publish" required: true type: choice options: - client - logging - runc - runc-shim - shim - shim-protos - snapshots version: description: "Version to publish (e.g. 0.8.1)" required: true type: string dryrun: description: "Dry run" required: false type: boolean default: false jobs: publish: name: "Publish ${{ inputs.crate }} v${{ inputs.version }}" runs-on: ubuntu-latest timeout-minutes: 10 permissions: contents: write env: CARGO_FILE: "crates/${{ inputs.crate }}/Cargo.toml" steps: - uses: actions/checkout@v6 - name: Validate version env: VERSION: ${{ inputs.version }} run: | if [[ ! "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.]+)?$ ]]; then echo "::error::Invalid version: $VERSION (expected semver, e.g. 0.8.1 or 1.0.0-rc.1)" exit 1 fi - name: Update crate version env: VERSION: ${{ inputs.version }} run: sed -i "s/^version = \".*\"/version = \"$VERSION\"/" $CARGO_FILE - name: Commit version bump env: CRATE: ${{ inputs.crate }} VERSION: ${{ inputs.version }} run: | git config user.name "GitHub Actions" git config user.email "actions@github.com" git add $CARGO_FILE git commit -m "Bump $CRATE to v$VERSION" - name: Install protobuf if: ${{ contains(fromJSON('["client","snapshots"]'), inputs.crate) }} run: | sudo apt update sudo apt install protobuf-compiler - name: Publish on crates.io run: cargo publish $DRYRUN --manifest-path $CARGO_FILE env: DRYRUN: ${{ inputs.dryrun && '--dry-run' || '' }} CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} - name: Push commit and tag if: ${{ !inputs.dryrun }} env: TAG: ${{ inputs.crate }}-v${{ inputs.version }} run: | git tag $TAG git push --atomic origin HEAD $TAG ================================================ FILE: .github/workflows/stale.yml ================================================ name: 'Close stale issues and PRs' on: schedule: - cron: "0 0 * * *" # Every day at midnight pull_request: paths: - '.github/workflows/stale.yml' permissions: read-all jobs: stale: runs-on: ubuntu-latest permissions: actions: write contents: write # only for delete-branch option issues: write pull-requests: write steps: - uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0 # All stale bot options: https://github.com/actions/stale#all-options with: # Idle number of days before marking issues/PRs stale days-before-stale: 90 # Idle number of days before closing stale issues/PRs days-before-close: 7 # Comment on the staled issues stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. This issue will be closed in 7 days unless new comments are made or the stale label is removed.' # Comment on the staled PRs stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. This PR will be closed in 7 days unless new comments are made or the stale label is removed.' # Comment on the staled issues while closed close-issue-message: 'This issue was closed because it has been stalled for 7 days with no activity.' # Comment on the staled PRs while closed close-pr-message: 'This PR was closed because it has been stalled for 7 days with no activity.' # Enable dry-run when changing this file from a PR. debug-only: ${{ github.event_name == 'pull_request' }} ================================================ FILE: .gitignore ================================================ # Generated by Cargo # will have compiled files and executables /target/ # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html Cargo.lock # These are backup files generated by rustfmt **/*.rs.bk log .vscode ================================================ FILE: Cargo.toml ================================================ [workspace] members = [ "crates/client", "crates/logging", "crates/runc", "crates/runc-shim", "crates/shim", "crates/shim-protos", "crates/snapshots", ] resolver = "2" [profile.release] # Keep binary as small as possible # https://doc.rust-lang.org/book/ch09-01-unrecoverable-errors-with-panic.html panic = 'abort' # Common for all crates # See https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#inheriting-a-dependency-from-a-workspace [workspace.package] license = "Apache-2.0" repository = "https://github.com/containerd/rust-extensions" homepage = "https://containerd.io" edition = "2021" # Common dependencies for all crates [workspace.dependencies] async-trait = "0.1.89" cgroups-rs = { version = "0.5", default-features = false } crossbeam = { version = "0.8", default-features = false } futures = { version = "0.3", default-features = false } libc = { version = "0.2", default-features = false } log = { version = "0.4", default-features = false } nix = { version = "0.31", default-features = false } oci-spec = { version = "0.9", default-features = false } prost = { version = "0.14", default-features = false } prost-build = { version = "0.14", default-features = false } prost-types = { version = "0.14", default-features = false } serde = { version = "1.0", default-features = false } serde_json = { version = "1.0", default-features = false } simple_logger = { version = "5.2", default-features = false } tempfile = "3.27" thiserror = "2.0" time = { version = "0.3", default-features = false } tokio = { version = "1.50", default-features = false } tonic = { version = "0.14", default-features = false } tonic-prost = "0.14" tonic-prost-build = { version = "0.14", default-features = false } tower = { version = "0.5", default-features = false } uuid = { version = "1.22", default-features = false } ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: MAINTAINERS ================================================ # rust-extensions maintainers # # As a containerd sub-project, containerd maintainers are also included from https://github.com/containerd/project/blob/main/MAINTAINERS. # See https://github.com/containerd/project/blob/main/GOVERNANCE.md for description of maintainer role # # REVIEWERS # GitHub ID, Name, Email address "Burning1020","Zhang Tianyang","burning9699@gmail.com" "jsturtevant","James Sturtevant","jstur@microsoft.com" "mossaka","Jiaxiao Zhou","jiazho@microsoft.com" "jokemanfire","Dingyang Hu","hu.dingyang@zte.com.cn" ================================================ FILE: README.md ================================================ # Rust extensions for containerd [![CI](https://github.com/mxpv/shim-rs/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/mxpv/shim-rs/actions/workflows/ci.yml) [![codecov](https://codecov.io/gh/containerd/rust-extensions/graph/badge.svg?token=VPUPN3MOFX)](https://codecov.io/gh/containerd/rust-extensions) [![Crates.io](https://img.shields.io/crates/l/containerd-client)](https://github.com/containerd/rust-extensions/blob/main/LICENSE) [![dependency status](https://deps.rs/repo/github/containerd/rust-extensions/status.svg)](https://deps.rs/repo/github/containerd/rust-extensions) A collection of Rust crates to extend containerd. This repository contains the following crates: | Name | Description | Links | | --- | --- | --- | | [containerd-shim-protos](crates/shim-protos) | TTRPC bindings to shim interfaces | [![Crates.io](https://img.shields.io/crates/v/containerd-shim-protos)](https://crates.io/crates/containerd-shim-protos) [![docs.rs](https://img.shields.io/docsrs/containerd-shim-protos)](https://docs.rs/containerd-shim-protos/latest/containerd_shim_protos/) | | [containerd-shim-logging](crates/logging) | Shim logger plugins | [![Crates.io](https://img.shields.io/crates/v/containerd-shim-logging)](https://crates.io/crates/containerd-shim-logging) [![docs.rs](https://img.shields.io/docsrs/containerd-shim-logging)](https://docs.rs/containerd-shim-logging/latest/containerd_shim_logging/) | | [containerd-shim](crates/shim) | Runtime v2 shim wrapper | [![Crates.io](https://img.shields.io/crates/v/containerd-shim)](https://crates.io/crates/containerd-shim) [![docs.rs](https://img.shields.io/docsrs/containerd-shim)](https://docs.rs/containerd-shim/latest/containerd_shim/) | | [containerd-client](crates/client) | GRPC bindings to containerd APIs | [![Crates.io](https://img.shields.io/crates/v/containerd-client)](https://crates.io/crates/containerd-client) [![docs.rs](https://img.shields.io/docsrs/containerd-client)](https://docs.rs/containerd-client/latest/containerd_client/) | | [containerd-snapshots](crates/snapshots) | Remote snapshotter for containerd | [![Crates.io](https://img.shields.io/crates/v/containerd-snapshots)](https://crates.io/crates/containerd-snapshots) [![docs.rs](https://img.shields.io/docsrs/containerd-snapshots)](https://docs.rs/containerd-snapshots/latest/containerd_snapshots/) | | [runc](crates/runc) | Rust wrapper for runc CLI | [![Crates.io](https://img.shields.io/crates/v/runc)](https://crates.io/crates/runc) [![docs.rs](https://img.shields.io/docsrs/runc)](https://docs.rs/runc/latest/runc/) | | [containerd-runc-shim](crates/runc-shim) | Runtime v2 runc shim implementation | [![Crates.io](https://img.shields.io/crates/v/containerd-runc-shim)](https://crates.io/crates/containerd-runc-shim) | ## How to build The build process as easy as: ```bash cargo build --release ``` ## Minimum supported Rust version (MSRV) The project typically targets the latest stable Rust version. Please refer to [rust-toolchain.toml](./rust-toolchain.toml) for exact version currently used by our CIs. ================================================ FILE: clippy.toml ================================================ msrv = "1.91" ================================================ FILE: codecov.yml ================================================ comment: false ================================================ FILE: crates/client/Cargo.toml ================================================ [package] name = "containerd-client" version = "0.9.0" authors = [ "Maksym Pavlenko ", "The containerd Authors", ] description = "GRPC bindings to containerd APIs" keywords = ["containerd", "client", "grpc", "containers"] categories = ["api-bindings", "asynchronous"] edition.workspace = true license.workspace = true repository.workspace = true homepage.workspace = true [[example]] name = "container" path = "examples/container.rs" [[example]] name = "version" path = "examples/version.rs" [dependencies] hyper-util = { version = "0.1.20", default-features = false, features = ["tokio"] } prost = { workspace = true, features = ["derive", "std"] } prost-types = { workspace = true, features = ["std"] } tokio = { workspace = true, features = ["net"], optional = true } tonic = { workspace = true, features = ["codegen", "channel"] } tonic-prost.workspace = true tower = { workspace = true, features = ["util"], optional = true } [build-dependencies] tonic-prost-build.workspace = true [dev-dependencies] tokio = { workspace = true, features = ["rt", "macros", "net"] } [features] connect = ["tokio", "tower"] docs = [] # Technically Tonic doesn't require Tokio and Tower dependencies here. # However we need them to implement `connect` helper and it's highly unlikely # that Tonic will be used with any other async runtime (see https://github.com/hyperium/tonic/issues/152) # So we enable `connect` feature by default (use `--no-default-features` otherwise). default = ["connect"] [package.metadata.docs.rs] features = ["docs"] [package.metadata.cargo-machete] ignored = ["prost", "tonic-prost"] ================================================ FILE: crates/client/README.md ================================================ # containerd GRPC client [![Crates.io](https://img.shields.io/crates/v/containerd-client)](https://crates.io/crates/containerd-client) [![docs.rs](https://img.shields.io/docsrs/containerd-client)](https://docs.rs/containerd-client/latest/containerd_client/) [![Crates.io](https://img.shields.io/crates/l/containerd-client)](https://github.com/containerd/rust-extensions/blob/main/LICENSE) [![CI](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml) This crate implements a GRPC client to query containerd APIs. ## Example Run with `cargo run --example version` ```rust use containerd_client::{connect, services::v1::version_client::VersionClient}; async fn query_version() { // Launch containerd at /run/containerd/containerd.sock let channel = connect("/run/containerd/containerd.sock").await.unwrap(); let mut client = VersionClient::new(channel); let resp = client.version(()).await.unwrap(); println!("Response: {:?}", resp.get_ref()); } ``` ================================================ FILE: crates/client/build.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{env, fs, io}; const PROTO_FILES: &[&str] = &[ // Types "types/descriptor.proto", "types/metrics.proto", "types/mount.proto", "types/platform.proto", "types/sandbox.proto", "types/task/task.proto", "types/transfer/imagestore.proto", "types/transfer/importexport.proto", "types/transfer/progress.proto", "types/transfer/registry.proto", "types/transfer/streaming.proto", // Services "services/containers/v1/containers.proto", "services/content/v1/content.proto", "services/diff/v1/diff.proto", "services/events/v1/events.proto", "services/images/v1/images.proto", "services/introspection/v1/introspection.proto", "services/leases/v1/leases.proto", "services/namespaces/v1/namespace.proto", "services/sandbox/v1/sandbox.proto", "services/snapshots/v1/snapshots.proto", "services/streaming/v1/streaming.proto", "services/tasks/v1/tasks.proto", "services/transfer/v1/transfer.proto", "services/version/v1/version.proto", // Events "events/container.proto", "events/content.proto", "events/image.proto", "events/namespace.proto", "events/snapshot.proto", "events/task.proto", ]; const FIXUP_MODULES: &[&str] = &[ "containerd.services.diff.v1", "containerd.services.images.v1", "containerd.services.introspection.v1", "containerd.services.sandbox.v1", "containerd.services.snapshots.v1", "containerd.services.tasks.v1", "containerd.services.containers.v1", "containerd.services.content.v1", "containerd.services.events.v1", ]; fn main() { let mut config = tonic_prost_build::Config::new(); config.protoc_arg("--experimental_allow_proto3_optional"); config.enable_type_names(); tonic_prost_build::configure() .build_server(false) .compile_with_config( config, PROTO_FILES, &["vendor/github.com/containerd/containerd/api/", "vendor/"], ) .expect("Failed to generate GRPC bindings"); for module in FIXUP_MODULES { fixup_imports(module).expect("Failed to fixup module"); } } // Original containerd's protobuf files contain Go style imports: // import "github.com/containerd/containerd/api/types/mount.proto"; // // Tonic produces invalid code for these imports: // error[E0433]: failed to resolve: there are too many leading `super` keywords // --> /containerd-rust-extensions/target/debug/build/containerd-client-protos-0a328c0c63f60cd0/out/containerd.services.diff.v1.rs:47:52 // | // 47 | pub diff: ::core::option::Option, // | ^^^^^ there are too many leading `super` keywords // // This func fixes imports to crate level ones, like `crate::types::Mount` fn fixup_imports(path: &str) -> Result<(), io::Error> { let out_dir = env::var("OUT_DIR").unwrap(); let path = format!("{}/{}.rs", out_dir, path); let contents = fs::read_to_string(&path)? .replace("super::super::super::v1::types", "crate::types::v1") // for tasks service .replace("super::super::super::super::types", "crate::types") .replace("super::super::super::types", "crate::types") .replace("super::super::super::super::google", "crate::google") .replace( "/// filters\\[0\\] or filters\\[1\\] or ... or filters\\[n-1\\] or filters\\[n\\]", r#" /// ```notrust /// filters[0] or filters[1] or ... or filters[n-1] or filters[n] /// ```"#, ); fs::write(path, contents)?; Ok(()) } ================================================ FILE: crates/client/examples/container.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{fs, fs::File}; use client::{ services::v1::{ container::Runtime, containers_client::ContainersClient, tasks_client::TasksClient, Container, CreateContainerRequest, CreateTaskRequest, DeleteContainerRequest, DeleteTaskRequest, StartRequest, WaitRequest, }, with_namespace, }; use containerd_client as client; use prost_types::Any; use tonic::Request; const CID: &str = "abc123"; const NAMESPACE: &str = "default"; /// Make sure you run containerd before running this example. /// NOTE: to run this example, you must prepare a rootfs. #[tokio::main(flavor = "current_thread")] async fn main() { let channel = client::connect("/run/containerd/containerd.sock") .await .expect("Connect Failed"); let mut client = ContainersClient::new(channel.clone()); let rootfs = "/tmp/busybox/bundle/rootfs"; // the container will run with command `echo $output` let output = "hello rust client"; let spec = include_str!("container_spec.json"); let spec = spec .to_string() .replace("$ROOTFS", rootfs) .replace("$OUTPUT", output); let spec = Any { type_url: "types.containerd.io/opencontainers/runtime-spec/1/Spec".to_string(), value: spec.into_bytes(), }; let container = Container { id: CID.to_string(), image: "docker.io/library/alpine:latest".to_string(), runtime: Some(Runtime { name: "io.containerd.runc.v2".to_string(), options: None, }), spec: Some(spec), ..Default::default() }; let req = CreateContainerRequest { container: Some(container), }; let req = with_namespace!(req, NAMESPACE); let _resp = client .create(req) .await .expect("Failed to create container"); println!("Container: {:?} created", CID); // create temp dir for stdin/stdout/stderr let tmp = std::env::temp_dir().join("containerd-client-test"); fs::create_dir_all(&tmp).expect("Failed to create temp directory"); let stdin = tmp.join("stdin"); let stdout = tmp.join("stdout"); let stderr = tmp.join("stderr"); File::create(&stdin).expect("Failed to create stdin"); File::create(&stdout).expect("Failed to create stdout"); File::create(&stderr).expect("Failed to create stderr"); // creat and start task let mut client = TasksClient::new(channel.clone()); let req = CreateTaskRequest { container_id: CID.to_string(), stdin: stdin.to_str().unwrap().to_string(), stdout: stdout.to_str().unwrap().to_string(), stderr: stderr.to_str().unwrap().to_string(), ..Default::default() }; let req = with_namespace!(req, NAMESPACE); let _resp = client.create(req).await.expect("Failed to create task"); println!("Task: {:?} created", CID); let req = StartRequest { container_id: CID.to_string(), ..Default::default() }; let req = with_namespace!(req, NAMESPACE); let _resp = client.start(req).await.expect("Failed to start task"); println!("Task: {:?} started", CID); // wait task let req = WaitRequest { container_id: CID.to_string(), ..Default::default() }; let req = with_namespace!(req, NAMESPACE); let _resp = client.wait(req).await.expect("Failed to wait task"); println!("Task: {:?} stopped", CID); // delete task let req = DeleteTaskRequest { container_id: CID.to_string(), }; let req = with_namespace!(req, NAMESPACE); let _resp = client.delete(req).await.expect("Failed to delete task"); println!("Task: {:?} deleted", CID); // delete container let mut client = ContainersClient::new(channel); let req = DeleteContainerRequest { id: CID.to_string(), }; let req = with_namespace!(req, NAMESPACE); let _resp = client .delete(req) .await .expect("Failed to delete container"); println!("Container: {:?} deleted", CID); // test container output let actual_stdout = fs::read_to_string(stdout).expect("read stdout actual"); assert_eq!(actual_stdout.strip_suffix('\n').unwrap(), output); // clear stdin/stdout/stderr let _ = fs::remove_dir_all(tmp); } ================================================ FILE: crates/client/examples/container_events.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use client::{ events::{ContainerCreate, ContainerDelete}, services::v1::{events_client::EventsClient, SubscribeRequest}, }; use containerd_client as client; /// Make sure you run containerd before running this example. #[tokio::main(flavor = "current_thread")] async fn main() { let channel = client::connect("/run/containerd/containerd.sock") .await .expect("Connect Failed"); let mut client = EventsClient::new(channel.clone()); let request = SubscribeRequest::default(); let mut response = client .subscribe(request) .await .expect("failed to subscribe to events") .into_inner(); loop { match response.message().await { Ok(event) => { if let Some(event) = event { match event.topic.as_str() { "/containers/create" => { if let Some(mut payload) = event.event { // Containerd doesn't send event payloads with a leading slash on the type URL, which is // required by the `Any` type specification. We add it manually here so that `prost` can // properly decode the payload. if !payload.type_url.starts_with('/') { payload.type_url.insert(0, '/'); } let payload: ContainerCreate = payload .to_msg() .expect("failed to parse ContainerCreate payload"); println!( "container created: id={} payload={:?}", payload.id, payload ); } } "/containers/delete" => { if let Some(mut payload) = event.event { // Containerd doesn't send event payloads with a leading slash on the type URL, which is // required by the `Any` type specification. We add it manually here so that `prost` can // properly decode the payload. if !payload.type_url.starts_with('/') { payload.type_url.insert(0, '/'); } let payload: ContainerDelete = payload .to_msg() .expect("failed to parse ContainerDelete payload"); println!( "container deleted: id={} payload={:?}", payload.id, payload ); } } _ => {} } } } Err(e) => { eprintln!("error while streaming events: {:?}", e); break; } } } } ================================================ FILE: crates/client/examples/container_pull.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::env::consts; use client::{ services::v1::{transfer_client::TransferClient, TransferOptions, TransferRequest}, to_any, types::{ transfer::{ImageStore, OciRegistry, UnpackConfiguration}, Platform, }, with_namespace, }; use containerd_client as client; use tonic::Request; const IMAGE: &str = "docker.io/library/alpine:latest"; const NAMESPACE: &str = "default"; /// Make sure you run containerd before running this example. /// NOTE: to run this example, you must prepare a rootfs. #[tokio::main(flavor = "current_thread")] async fn main() { let arch = match consts::ARCH { "x86_64" => "amd64", "aarch64" => "arm64", _ => consts::ARCH, }; let channel = client::connect("/run/containerd/containerd.sock") .await .expect("Connect Failed"); let mut client = TransferClient::new(channel.clone()); // Create the source (OCIRegistry) let source = OciRegistry { reference: IMAGE.to_string(), resolver: Default::default(), }; let platform = Platform { os: "linux".to_string(), architecture: arch.to_string(), variant: "".to_string(), os_version: "".to_string(), os_features: vec![], }; // Create the destination (ImageStore) let destination = ImageStore { name: IMAGE.to_string(), platforms: vec![platform.clone()], unpacks: vec![UnpackConfiguration { platform: Some(platform), ..Default::default() }], ..Default::default() }; let anys = to_any(&source); let anyd = to_any(&destination); println!("Pulling image for linux/{} from source: {:?}", arch, source); // Create the transfer request let request = TransferRequest { source: Some(anys), destination: Some(anyd), options: Some(TransferOptions { ..Default::default() }), }; // Execute the transfer (pull) client .transfer(with_namespace!(request, NAMESPACE)) .await .expect("unable to transfer image"); } ================================================ FILE: crates/client/examples/container_spec.json ================================================ { "ociVersion": "1.0.0-rc2-dev", "platform": { "os": "linux", "arch": "amd64" }, "process": { "terminal": false, "consoleSize": { "height": 0, "width": 0 }, "user": { "uid": 0, "gid": 0 }, "args": [ "echo", "$OUTPUT" ], "env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "TERM=xterm" ], "cwd": "/", "rlimits": [{ "type": "RLIMIT_NOFILE", "hard": 1024, "soft": 1024 }], "noNewPrivileges": true }, "root": { "path": "$ROOTFS", "readonly": false }, "hostname": "test", "mounts": [{ "destination": "/proc", "type": "proc", "source": "proc" }, { "destination": "/dev", "type": "tmpfs", "source": "tmpfs", "options": [ "nosuid", "strictatime", "mode=755", "size=65536k" ] }, { "destination": "/dev/pts", "type": "devpts", "source": "devpts", "options": [ "nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5" ] }, { "destination": "/dev/shm", "type": "tmpfs", "source": "shm", "options": [ "nosuid", "noexec", "nodev", "mode=1777", "size=65536k" ] }, { "destination": "/dev/mqueue", "type": "mqueue", "source": "mqueue", "options": [ "nosuid", "noexec", "nodev" ] }, { "destination": "/sys", "type": "sysfs", "source": "sysfs", "options": [ "nosuid", "noexec", "nodev", "ro" ] }, { "destination": "/sys/fs/cgroup", "type": "cgroup", "source": "cgroup", "options": [ "nosuid", "noexec", "nodev", "relatime", "ro" ] } ], "hooks": {}, "linux": { "devices": [], "cgroupsPath": "kata/vfiotest", "resources": { "devices": [ {"allow":false,"access":"rwm"}, {"allow":true,"type":"c","major":1,"minor":3,"access":"rwm"}, {"allow":true,"type":"c","major":1,"minor":5,"access":"rwm"}, {"allow":true,"type":"c","major":1,"minor":8,"access":"rwm"}, {"allow":true,"type":"c","major":1,"minor":9,"access":"rwm"}, {"allow":true,"type":"c","major":5,"minor":0,"access":"rwm"}, {"allow":true,"type":"c","major":5,"minor":1,"access":"rwm"} ] }, "namespaces": [{ "type": "pid" }, { "type": "network" }, { "type": "ipc" }, { "type": "uts" }, { "type": "mount" } ], "maskedPaths": [ "/proc/kcore", "/proc/latency_stats", "/proc/timer_list", "/proc/timer_stats", "/proc/sched_debug", "/sys/firmware" ], "readonlyPaths": [ "/proc/asound", "/proc/bus", "/proc/fs", "/proc/irq", "/proc/sys", "/proc/sysrq-trigger" ] } } ================================================ FILE: crates/client/examples/version.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use containerd_client::Client; /// Make sure you run containerd before running this example. #[tokio::main(flavor = "current_thread")] async fn main() { #[cfg(unix)] let path = "/var/run/containerd/containerd.sock"; #[cfg(windows)] let path = r"\\.\pipe\containerd-containerd"; let client = Client::from_path(path).await.expect("Connect failed"); let resp = client .version() .version(()) .await .expect("Failed to query version"); println!("Response: {:?}", resp.get_ref()); } ================================================ FILE: crates/client/rsync.txt ================================================ api/events/*.proto api/services/**/*.proto api/types/*.proto api/types/**/*.proto protobuf/plugin/fieldpath.proto ================================================ FILE: crates/client/src/lib.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![cfg_attr(feature = "docs", doc = include_str!("../README.md"))] // No way to derive Eq with tonic :( // See https://github.com/hyperium/tonic/issues/1056 #![allow(clippy::derive_partial_eq_without_eq)] pub use tonic; /// Generated `containerd.types` types. pub mod types { tonic::include_proto!("containerd.types"); pub mod v1 { tonic::include_proto!("containerd.v1.types"); } pub mod transfer { tonic::include_proto!("containerd.types.transfer"); } } /// Generated `google.rpc` types, containerd services typically use some of these types. pub mod google { #[allow(rustdoc::broken_intra_doc_links)] pub mod rpc { tonic::include_proto!("google.rpc"); } } /// Generated `containerd.services.*` services. pub mod services { #[allow(clippy::tabs_in_doc_comments)] #[allow(rustdoc::invalid_rust_codeblocks)] #[allow(rustdoc::invalid_html_tags)] pub mod v1 { tonic::include_proto!("containerd.services.containers.v1"); tonic::include_proto!("containerd.services.content.v1"); tonic::include_proto!("containerd.services.diff.v1"); tonic::include_proto!("containerd.services.events.v1"); tonic::include_proto!("containerd.services.images.v1"); tonic::include_proto!("containerd.services.introspection.v1"); tonic::include_proto!("containerd.services.leases.v1"); tonic::include_proto!("containerd.services.namespaces.v1"); tonic::include_proto!("containerd.services.streaming.v1"); tonic::include_proto!("containerd.services.tasks.v1"); tonic::include_proto!("containerd.services.transfer.v1"); // Sandbox services (Controller and Store) don't make it clear that they are for sandboxes. // Wrap these into a sub module to make the names more clear. pub mod sandbox { tonic::include_proto!("containerd.services.sandbox.v1"); } // Snapshot's `Info` conflicts with Content's `Info`, so wrap it into a separate sub module. pub mod snapshots { tonic::include_proto!("containerd.services.snapshots.v1"); } tonic::include_proto!("containerd.services.version.v1"); } } /// Generated event types. pub mod events { tonic::include_proto!("containerd.events"); } /// Connect creates a unix channel to containerd GRPC socket. /// /// This helper intended to be used in conjunction with [Tokio](https://tokio.rs) runtime. #[cfg(feature = "connect")] pub async fn connect( path: impl AsRef, ) -> Result { use tonic::transport::Endpoint; let path = path.as_ref().to_path_buf(); // Taken from https://github.com/hyperium/tonic/blob/71fca362d7ffbb230547f23b3f2fb75c414063a8/examples/src/uds/client.rs#L21-L28 // There will ignore this uri because uds do not use it // and make connection with UnixStream::connect. let channel = Endpoint::try_from("http://[::]")? .connect_with_connector(tower::service_fn(move |_| { let path = path.clone(); async move { #[cfg(unix)] { Ok::<_, std::io::Error>(hyper_util::rt::TokioIo::new( tokio::net::UnixStream::connect(path).await?, )) } #[cfg(windows)] { let client = tokio::net::windows::named_pipe::ClientOptions::new() .open(&path) .map_err(|e| std::io::Error::from(e))?; Ok::<_, std::io::Error>(hyper_util::rt::TokioIo::new(client)) } } })) .await?; Ok(channel) } use prost::{Message, Name}; use prost_types::Any; // to_any provides a helper to match the current use of the protobuf "fullname" trait // in the Go code on the gRPC server side in containerd when handling matching of Any // types to registered types on the server. Further discussion on future direction // of typeurl in this issue: https://github.com/containerd/rust-extensions/issues/362 pub fn to_any(m: &T) -> Any { let mut anyt = Any::from_msg(m).unwrap(); anyt.type_url = T::full_name(); anyt } /// Help to inject namespace into request. /// /// To use this macro, the `tonic::Request` is needed. #[macro_export] macro_rules! with_namespace { ($req:expr, $ns:expr) => {{ let mut req = Request::new($req); let md = req.metadata_mut(); // https://github.com/containerd/containerd/blob/main/pkg/namespaces/grpc.go#L27 md.insert("containerd-namespace", $ns.parse().unwrap()); req }}; } use services::v1::{ containers_client::ContainersClient, content_client::ContentClient, diff_client::DiffClient, events_client::EventsClient, images_client::ImagesClient, introspection_client::IntrospectionClient, leases_client::LeasesClient, namespaces_client::NamespacesClient, sandbox::{controller_client::ControllerClient, store_client::StoreClient}, snapshots::snapshots_client::SnapshotsClient, streaming_client::StreamingClient, tasks_client::TasksClient, transfer_client::TransferClient, version_client::VersionClient, }; use tonic::transport::{Channel, Error}; /// Client to containerd's APIs. pub struct Client { channel: Channel, } impl From for Client { fn from(value: Channel) -> Self { Self { channel: value } } } impl Client { /// Create a new client from UDS socket. #[cfg(feature = "connect")] pub async fn from_path(path: impl AsRef) -> Result { let channel = connect(path).await?; Ok(Self { channel }) } /// Access to the underlying Tonic channel. #[inline] pub fn channel(&self) -> Channel { self.channel.clone() } /// Version service. #[inline] pub fn version(&self) -> VersionClient { VersionClient::new(self.channel()) } /// Task service client. #[inline] pub fn tasks(&self) -> TasksClient { TasksClient::new(self.channel()) } /// Transfer service client. #[inline] pub fn transfer(&self) -> TransferClient { TransferClient::new(self.channel()) } /// Sandbox store client. #[inline] pub fn sandbox_store(&self) -> StoreClient { StoreClient::new(self.channel()) } /// Streaming services client. #[inline] pub fn streaming(&self) -> StreamingClient { StreamingClient::new(self.channel()) } /// Sandbox controller client. #[inline] pub fn sandbox_controller(&self) -> ControllerClient { ControllerClient::new(self.channel()) } /// Snapshots service. #[inline] pub fn snapshots(&self) -> SnapshotsClient { SnapshotsClient::new(self.channel()) } /// Namespaces service. #[inline] pub fn namespaces(&self) -> NamespacesClient { NamespacesClient::new(self.channel()) } /// Leases service. #[inline] pub fn leases(&self) -> LeasesClient { LeasesClient::new(self.channel()) } /// Intropection service. #[inline] pub fn introspection(&self) -> IntrospectionClient { IntrospectionClient::new(self.channel()) } /// Image service. #[inline] pub fn images(&self) -> ImagesClient { ImagesClient::new(self.channel()) } /// Event service. #[inline] pub fn events(&self) -> EventsClient { EventsClient::new(self.channel()) } /// Diff service. #[inline] pub fn diff(&self) -> DiffClient { DiffClient::new(self.channel()) } /// Content service. #[inline] pub fn content(&self) -> ContentClient { ContentClient::new(self.channel()) } /// Container service. #[inline] pub fn containers(&self) -> ContainersClient { ContainersClient::new(self.channel()) } } #[cfg(test)] mod tests { use prost_types::Any; use crate::events::ContainerCreate; #[test] fn any_roundtrip() { let original = ContainerCreate { id: "test".to_string(), image: "test".to_string(), runtime: None, }; let any = Any::from_msg(&original).expect("should not fail to encode"); let decoded: ContainerCreate = any.to_msg().expect("should not fail to decode"); assert_eq!(original, decoded) } } ================================================ FILE: crates/client/vendor/README.md ================================================ # Vendor This directory contains vendor dependencies needed to generate protobuf bindings. Proto files are copy-pasted directly from upstream repos: + https://github.com/containerd/containerd + https://github.com/protocolbuffers/protobuf + https://github.com/gogo/protobuf ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/events/container.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "google/protobuf/any.proto"; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message ContainerCreate { string id = 1; string image = 2; message Runtime { string name = 1; google.protobuf.Any options = 2; } Runtime runtime = 3; } message ContainerUpdate { string id = 1; string image = 2; map labels = 3; string snapshot_key = 4; } message ContainerDelete { string id = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/events/content.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message ContentCreate { string digest = 1; int64 size = 2; } message ContentDelete { string digest = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/events/image.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.images.v1; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message ImageCreate { string name = 1; map labels = 2; } message ImageUpdate { string name = 1; map labels = 2; } message ImageDelete { string name = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/events/namespace.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message NamespaceCreate { string name = 1; map labels = 2; } message NamespaceUpdate { string name = 1; map labels = 2; } message NamespaceDelete { string name = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/events/sandbox.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; message SandboxCreate { string sandbox_id = 1; } message SandboxStart { string sandbox_id = 1; } message SandboxExit { string sandbox_id = 1; uint32 exit_status = 2; google.protobuf.Timestamp exited_at = 3; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/events/snapshot.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message SnapshotPrepare { string key = 1; string parent = 2; string snapshotter = 5; } message SnapshotCommit { string key = 1; string name = 2; string snapshotter = 5; } message SnapshotRemove { string key = 1; string snapshotter = 5; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/events/task.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "google/protobuf/timestamp.proto"; import "types/fieldpath.proto"; import "types/mount.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message TaskCreate { string container_id = 1; string bundle = 2; repeated containerd.types.Mount rootfs = 3; TaskIO io = 4; string checkpoint = 5; uint32 pid = 6; } message TaskStart { string container_id = 1; uint32 pid = 2; } message TaskDelete { string container_id = 1; uint32 pid = 2; uint32 exit_status = 3; google.protobuf.Timestamp exited_at = 4; // id is the specific exec. By default if omitted will be `""` thus matches // the init exec of the task matching `container_id`. string id = 5; } message TaskIO { string stdin = 1; string stdout = 2; string stderr = 3; bool terminal = 4; } message TaskExit { string container_id = 1; string id = 2; uint32 pid = 3; uint32 exit_status = 4; google.protobuf.Timestamp exited_at = 5; } message TaskOOM { string container_id = 1; } message TaskExecAdded { string container_id = 1; string exec_id = 2; } message TaskExecStarted { string container_id = 1; string exec_id = 2; uint32 pid = 3; } message TaskPaused { string container_id = 1; } message TaskResumed { string container_id = 1; } message TaskCheckpointed { string container_id = 1; string checkpoint = 2; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.containers.v1; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/services/containers/v1;containers"; // Containers provides metadata storage for containers used in the execution // service. // // The objects here provide an state-independent view of containers for use in // management and resource pinning. From that perspective, containers do not // have a "state" but rather this is the set of resources that will be // considered in use by the container. // // From the perspective of the execution service, these objects represent the // base parameters for creating a container process. // // In general, when looking to add fields for this type, first ask yourself // whether or not the function of the field has to do with runtime execution or // is invariant of the runtime state of the container. If it has to do with // runtime, or changes as the "container" is started and stops, it probably // doesn't belong on this object. service Containers { rpc Get(GetContainerRequest) returns (GetContainerResponse); rpc List(ListContainersRequest) returns (ListContainersResponse); rpc ListStream(ListContainersRequest) returns (stream ListContainerMessage); rpc Create(CreateContainerRequest) returns (CreateContainerResponse); rpc Update(UpdateContainerRequest) returns (UpdateContainerResponse); rpc Delete(DeleteContainerRequest) returns (google.protobuf.Empty); } message Container { // ID is the user-specified identifier. // // This field may not be updated. string id = 1; // Labels provides an area to include arbitrary data on containers. // // The combined size of a key/value pair cannot exceed 4096 bytes. // // Note that to add a new value to this field, read the existing set and // include the entire result in the update call. map labels = 2; // Image contains the reference of the image used to build the // specification and snapshots for running this container. // // If this field is updated, the spec and rootfs needed to updated, as well. string image = 3; message Runtime { // Name is the name of the runtime. string name = 1; // Options specify additional runtime initialization options. google.protobuf.Any options = 2; } // Runtime specifies which runtime to use for executing this container. Runtime runtime = 4; // Spec to be used when creating the container. This is runtime specific. google.protobuf.Any spec = 5; // Snapshotter specifies the snapshotter name used for rootfs string snapshotter = 6; // SnapshotKey specifies the snapshot key to use for the container's root // filesystem. When starting a task from this container, a caller should // look up the mounts from the snapshot service and include those on the // task create request. // // Snapshots referenced in this field will not be garbage collected. // // This field is set to empty when the rootfs is not a snapshot. // // This field may be updated. string snapshot_key = 7; // CreatedAt is the time the container was first created. google.protobuf.Timestamp created_at = 8; // UpdatedAt is the last time the container was mutated. google.protobuf.Timestamp updated_at = 9; // Extensions allow clients to provide zero or more blobs that are directly // associated with the container. One may provide protobuf, json, or other // encoding formats. The primary use of this is to further decorate the // container object with fields that may be specific to a client integration. // // The key portion of this map should identify a "name" for the extension // that should be unique against other extensions. When updating extension // data, one should only update the specified extension using field paths // to select a specific map key. map extensions = 10; // Sandbox ID this container belongs to. string sandbox = 11; } message GetContainerRequest { string id = 1; } message GetContainerResponse { Container container = 1; } message ListContainersRequest { // Filters contains one or more filters using the syntax defined in the // containerd filter package. // // The returned result will be those that match any of the provided // filters. Expanded, containers that match the following will be // returned: // // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 1; } message ListContainersResponse { repeated Container containers = 1; } message CreateContainerRequest { Container container = 1; } message CreateContainerResponse { Container container = 1; } // UpdateContainerRequest updates the metadata on one or more container. // // The operation should follow semantics described in // https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask, // unless otherwise qualified. message UpdateContainerRequest { // Container provides the target values, as declared by the mask, for the update. // // The ID field must be set. Container container = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. google.protobuf.FieldMask update_mask = 2; } message UpdateContainerResponse { Container container = 1; } message DeleteContainerRequest { string id = 1; } message ListContainerMessage { Container container = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.content.v1; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/services/content/v1;content"; // Content provides access to a content addressable storage system. service Content { // Info returns information about a committed object. // // This call can be used for getting the size of content and checking for // existence. rpc Info(InfoRequest) returns (InfoResponse); // Update updates content metadata. // // This call can be used to manage the mutable content labels. The // immutable metadata such as digest, size, and committed at cannot // be updated. rpc Update(UpdateRequest) returns (UpdateResponse); // List streams the entire set of content as Info objects and closes the // stream. // // Typically, this will yield a large response, chunked into messages. // Clients should make provisions to ensure they can handle the entire data // set. rpc List(ListContentRequest) returns (stream ListContentResponse); // Delete will delete the referenced object. rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty); // Read allows one to read an object based on the offset into the content. // // The requested data may be returned in one or more messages. rpc Read(ReadContentRequest) returns (stream ReadContentResponse); // Status returns the status for a single reference. rpc Status(StatusRequest) returns (StatusResponse); // ListStatuses returns the status of ongoing object ingestions, started via // Write. // // Only those matching the regular expression will be provided in the // response. If the provided regular expression is empty, all ingestions // will be provided. rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse); // Write begins or resumes writes to a resource identified by a unique ref. // Only one active stream may exist at a time for each ref. // // Once a write stream has started, it may only write to a single ref, thus // once a stream is started, the ref may be omitted on subsequent writes. // // For any write transaction represented by a ref, only a single write may // be made to a given offset. If overlapping writes occur, it is an error. // Writes should be sequential and implementations may throw an error if // this is required. // // If expected_digest is set and already part of the content store, the // write will fail. // // When completed, the commit flag should be set to true. If expected size // or digest is set, the content will be validated against those values. rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse); // Abort cancels the ongoing write named in the request. Any resources // associated with the write will be collected. rpc Abort(AbortRequest) returns (google.protobuf.Empty); } message Info { // Digest is the hash identity of the blob. string digest = 1; // Size is the total number of bytes in the blob. int64 size = 2; // CreatedAt provides the time at which the blob was committed. google.protobuf.Timestamp created_at = 3; // UpdatedAt provides the time the info was last updated. google.protobuf.Timestamp updated_at = 4; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 5; } message InfoRequest { string digest = 1; } message InfoResponse { Info info = 1; } message UpdateRequest { Info info = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. // // In info, Digest, Size, and CreatedAt are immutable, // other field may be updated using this mask. // If no mask is provided, all mutable field are updated. google.protobuf.FieldMask update_mask = 2; } message UpdateResponse { Info info = 1; } message ListContentRequest { // Filters contains one or more filters using the syntax defined in the // containerd filter package. // // The returned result will be those that match any of the provided // filters. Expanded, containers that match the following will be // returned: // // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 1; } message ListContentResponse { repeated Info info = 1; } message DeleteContentRequest { // Digest specifies which content to delete. string digest = 1; } // ReadContentRequest defines the fields that make up a request to read a portion of // data from a stored object. message ReadContentRequest { // Digest is the hash identity to read. string digest = 1; // Offset specifies the number of bytes from the start at which to begin // the read. If zero or less, the read will be from the start. This uses // standard zero-indexed semantics. int64 offset = 2; // size is the total size of the read. If zero, the entire blob will be // returned by the service. int64 size = 3; } // ReadContentResponse carries byte data for a read request. message ReadContentResponse { int64 offset = 1; // offset of the returned data bytes data = 2; // actual data } message Status { google.protobuf.Timestamp started_at = 1; google.protobuf.Timestamp updated_at = 2; string ref = 3; int64 offset = 4; int64 total = 5; string expected = 6; } message StatusRequest { string ref = 1; } message StatusResponse { Status status = 1; } message ListStatusesRequest { repeated string filters = 1; } message ListStatusesResponse { repeated Status statuses = 1; } // WriteAction defines the behavior of a WriteRequest. enum WriteAction { // WriteActionStat instructs the writer to return the current status while // holding the lock on the write. STAT = 0; // WriteActionWrite sets the action for the write request to write data. // // Any data included will be written at the provided offset. The // transaction will be left open for further writes. // // This is the default. WRITE = 1; // WriteActionCommit will write any outstanding data in the message and // commit the write, storing it under the digest. // // This can be used in a single message to send the data, verify it and // commit it. // // This action will always terminate the write. COMMIT = 2; } // WriteContentRequest writes data to the request ref at offset. message WriteContentRequest { // Action sets the behavior of the write. // // When this is a write and the ref is not yet allocated, the ref will be // allocated and the data will be written at offset. // // If the action is write and the ref is allocated, it will accept data to // an offset that has not yet been written. // // If the action is write and there is no data, the current write status // will be returned. This works differently from status because the stream // holds a lock. WriteAction action = 1; // Ref identifies the pre-commit object to write to. string ref = 2; // Total can be set to have the service validate the total size of the // committed content. // // The latest value before or with the commit action message will be use to // validate the content. If the offset overflows total, the service may // report an error. It is only required on one message for the write. // // If the value is zero or less, no validation of the final content will be // performed. int64 total = 3; // Expected can be set to have the service validate the final content against // the provided digest. // // If the digest is already present in the object store, an AlreadyExists // error will be returned. // // Only the latest version will be used to check the content against the // digest. It is only required to include it on a single message, before or // with the commit action message. string expected = 4; // Offset specifies the number of bytes from the start at which to begin // the write. For most implementations, this means from the start of the // file. This uses standard, zero-indexed semantics. // // If the action is write, the remote may remove all previously written // data after the offset. Implementations may support arbitrary offsets but // MUST support reseting this value to zero with a write. If an // implementation does not support a write at a particular offset, an // OutOfRange error must be returned. int64 offset = 5; // Data is the actual bytes to be written. // // If this is empty and the message is not a commit, a response will be // returned with the current write state. bytes data = 6; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 7; } // WriteContentResponse is returned on the culmination of a write call. message WriteContentResponse { // Action contains the action for the final message of the stream. A writer // should confirm that they match the intended result. WriteAction action = 1; // StartedAt provides the time at which the write began. // // This must be set for stat and commit write actions. All other write // actions may omit this. google.protobuf.Timestamp started_at = 2; // UpdatedAt provides the last time of a successful write. // // This must be set for stat and commit write actions. All other write // actions may omit this. google.protobuf.Timestamp updated_at = 3; // Offset is the current committed size for the write. int64 offset = 4; // Total provides the current, expected total size of the write. // // We include this to provide consistency with the Status structure on the // client writer. // // This is only valid on the Stat and Commit response. int64 total = 5; // Digest, if present, includes the digest up to the currently committed // bytes. If action is commit, this field will be set. It is implementation // defined if this is set for other actions. string digest = 6; } message AbortRequest { string ref = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.diff.v1; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; import "types/descriptor.proto"; import "types/mount.proto"; option go_package = "github.com/containerd/containerd/api/services/diff/v1;diff"; // Diff service creates and applies diffs service Diff { // Apply applies the content associated with the provided digests onto // the provided mounts. Archive content will be extracted and // decompressed if necessary. rpc Apply(ApplyRequest) returns (ApplyResponse); // Diff creates a diff between the given mounts and uploads the result // to the content store. rpc Diff(DiffRequest) returns (DiffResponse); } message ApplyRequest { // Diff is the descriptor of the diff to be extracted containerd.types.Descriptor diff = 1; repeated containerd.types.Mount mounts = 2; map payloads = 3; // SyncFs is to synchronize the underlying filesystem containing files. bool sync_fs = 4; } message ApplyResponse { // Applied is the descriptor for the object which was applied. // If the input was a compressed blob then the result will be // the descriptor for the uncompressed blob. containerd.types.Descriptor applied = 1; } message DiffRequest { // Left are the mounts which represent the older copy // in which is the base of the computed changes. repeated containerd.types.Mount left = 1; // Right are the mounts which represents the newer copy // in which changes from the left were made into. repeated containerd.types.Mount right = 2; // MediaType is the media type descriptor for the created diff // object string media_type = 3; // Ref identifies the pre-commit content store object. This // reference can be used to get the status from the content store. string ref = 4; // Labels are the labels to apply to the generated content // on content store commit. map labels = 5; // SourceDateEpoch specifies the timestamp used to provide control for reproducibility. // See also https://reproducible-builds.org/docs/source-date-epoch/ . // // Since containerd v2.0, the whiteout timestamps are set to zero (1970-01-01), // not to the source date epoch. google.protobuf.Timestamp source_date_epoch = 6; } message DiffResponse { // Diff is the descriptor of the diff which can be applied containerd.types.Descriptor diff = 3; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.events.v1; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "types/event.proto"; option go_package = "github.com/containerd/containerd/api/services/events/v1;events"; service Events { // Publish an event to a topic. // // The event will be packed into a timestamp envelope with the namespace // introspected from the context. The envelope will then be dispatched. rpc Publish(PublishRequest) returns (google.protobuf.Empty); // Forward sends an event that has already been packaged into an envelope // with a timestamp and namespace. // // This is useful if earlier timestamping is required or when forwarding on // behalf of another component, namespace or publisher. rpc Forward(ForwardRequest) returns (google.protobuf.Empty); // Subscribe to a stream of events, possibly returning only that match any // of the provided filters. // // Unlike many other methods in containerd, subscribers will get messages // from all namespaces unless otherwise specified. If this is not desired, // a filter can be provided in the format 'namespace==' to // restrict the received events. rpc Subscribe(SubscribeRequest) returns (stream containerd.types.Envelope); } message PublishRequest { string topic = 1; google.protobuf.Any event = 2; } message ForwardRequest { containerd.types.Envelope envelope = 1; } message SubscribeRequest { repeated string filters = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.images.v1; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; import "types/descriptor.proto"; option go_package = "github.com/containerd/containerd/api/services/images/v1;images"; // Images is a service that allows one to register images with containerd. // // In containerd, an image is merely the mapping of a name to a content root, // described by a descriptor. The behavior and state of image is purely // dictated by the type of the descriptor. // // From the perspective of this service, these references are mostly shallow, // in that the existence of the required content won't be validated until // required by consuming services. // // As such, this can really be considered a "metadata service". service Images { // Get returns an image by name. rpc Get(GetImageRequest) returns (GetImageResponse); // List returns a list of all images known to containerd. rpc List(ListImagesRequest) returns (ListImagesResponse); // Create an image record in the metadata store. // // The name of the image must be unique. rpc Create(CreateImageRequest) returns (CreateImageResponse); // Update assigns the name to a given target image based on the provided // image. rpc Update(UpdateImageRequest) returns (UpdateImageResponse); // Delete deletes the image by name. rpc Delete(DeleteImageRequest) returns (google.protobuf.Empty); } message Image { // Name provides a unique name for the image. // // Containerd treats this as the primary identifier. string name = 1; // Labels provides free form labels for the image. These are runtime only // and do not get inherited into the package image in any way. // // Labels may be updated using the field mask. // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 2; // Target describes the content entry point of the image. containerd.types.Descriptor target = 3; // CreatedAt is the time the image was first created. google.protobuf.Timestamp created_at = 7; // UpdatedAt is the last time the image was mutated. google.protobuf.Timestamp updated_at = 8; } message GetImageRequest { string name = 1; } message GetImageResponse { Image image = 1; } message CreateImageRequest { Image image = 1; google.protobuf.Timestamp source_date_epoch = 2; } message CreateImageResponse { Image image = 1; } message UpdateImageRequest { // Image provides a full or partial image for update. // // The name field must be set or an error will be returned. Image image = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. google.protobuf.FieldMask update_mask = 2; google.protobuf.Timestamp source_date_epoch = 3; } message UpdateImageResponse { Image image = 1; } message ListImagesRequest { // Filters contains one or more filters using the syntax defined in the // containerd filter package. // // The returned result will be those that match any of the provided // filters. Expanded, images that match the following will be // returned: // // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 1; } message ListImagesResponse { repeated Image images = 1; } message DeleteImageRequest { string name = 1; // Sync indicates that the delete and cleanup should be done // synchronously before returning to the caller // // Default is false bool sync = 2; // Target value for image to be deleted // // If image descriptor does not match the same digest, // the delete operation will return "not found" error. optional containerd.types.Descriptor target = 3; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.introspection.v1; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; import "google/rpc/status.proto"; import "types/introspection.proto"; import "types/platform.proto"; option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection"; service Introspection { // Plugins returns a list of plugins in containerd. // // Clients can use this to detect features and capabilities when using // containerd. rpc Plugins(PluginsRequest) returns (PluginsResponse); // Server returns information about the containerd server rpc Server(google.protobuf.Empty) returns (ServerResponse); // PluginInfo returns information directly from a plugin if the plugin supports it rpc PluginInfo(PluginInfoRequest) returns (PluginInfoResponse); } message Plugin { // Type defines the type of plugin. // // See package plugin for a list of possible values. Non core plugins may // define their own values during registration. string type = 1; // ID identifies the plugin uniquely in the system. string id = 2; // Requires lists the plugin types required by this plugin. repeated string requires = 3; // Platforms enumerates the platforms this plugin will support. // // If values are provided here, the plugin will only be operable under the // provided platforms. // // If this is empty, the plugin will work across all platforms. // // If the plugin prefers certain platforms over others, they should be // listed from most to least preferred. repeated types.Platform platforms = 4; // Exports allows plugins to provide values about state or configuration to // interested parties. // // One example is exposing the configured path of a snapshotter plugin. map exports = 5; // Capabilities allows plugins to communicate feature switches to allow // clients to detect features that may not be on be default or may be // different from version to version. // // Use this sparingly. repeated string capabilities = 6; // InitErr will be set if the plugin fails initialization. // // This means the plugin may have been registered but a non-terminal error // was encountered during initialization. // // Plugins that have this value set cannot be used. google.rpc.Status init_err = 7; } message PluginsRequest { // Filters contains one or more filters using the syntax defined in the // containerd filter package. // // The returned result will be those that match any of the provided // filters. Expanded, plugins that match the following will be // returned: // // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 1; } message PluginsResponse { repeated Plugin plugins = 1; } message ServerResponse { string uuid = 1; uint64 pid = 2; uint64 pidns = 3; // PID namespace, such as 4026531836 repeated DeprecationWarning deprecations = 4; } message DeprecationWarning { string id = 1; string message = 2; google.protobuf.Timestamp last_occurrence = 3; } message PluginInfoRequest { string type = 1; string id = 2; // Options may be used to request extra dynamic information from // a plugin. // This object is determined by the plugin and the plugin may return // NotImplemented or InvalidArgument if it is not supported google.protobuf.Any options = 3; } message PluginInfoResponse { Plugin plugin = 1; google.protobuf.Any extra = 2; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.leases.v1; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/services/leases/v1;leases"; // Leases service manages resources leases within the metadata store. service Leases { // Create creates a new lease for managing changes to metadata. A lease // can be used to protect objects from being removed. rpc Create(CreateRequest) returns (CreateResponse); // Delete deletes the lease and makes any unreferenced objects created // during the lease eligible for garbage collection if not referenced // or retained by other resources during the lease. rpc Delete(DeleteRequest) returns (google.protobuf.Empty); // List lists all active leases, returning the full list of // leases and optionally including the referenced resources. rpc List(ListRequest) returns (ListResponse); // AddResource references the resource by the provided lease. rpc AddResource(AddResourceRequest) returns (google.protobuf.Empty); // DeleteResource dereferences the resource by the provided lease. rpc DeleteResource(DeleteResourceRequest) returns (google.protobuf.Empty); // ListResources lists all the resources referenced by the lease. rpc ListResources(ListResourcesRequest) returns (ListResourcesResponse); } // Lease is an object which retains resources while it exists. message Lease { string id = 1; google.protobuf.Timestamp created_at = 2; map labels = 3; } message CreateRequest { // ID is used to identity the lease, when the id is not set the service // generates a random identifier for the lease. string id = 1; map labels = 3; } message CreateResponse { Lease lease = 1; } message DeleteRequest { string id = 1; // Sync indicates that the delete and cleanup should be done // synchronously before returning to the caller // // Default is false bool sync = 2; } message ListRequest { repeated string filters = 1; } message ListResponse { repeated Lease leases = 1; } message Resource { string id = 1; // For snapshotter resource, there are many snapshotter types here, like // overlayfs, devmapper etc. The type will be formatted with type, // like "snapshotter/overlayfs". string type = 2; } message AddResourceRequest { string id = 1; Resource resource = 2; } message DeleteResourceRequest { string id = 1; Resource resource = 2; } message ListResourcesRequest { string id = 1; } message ListResourcesResponse { repeated Resource resources = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/mounts/v1/mounts.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.mounts.v1; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "types/mount.proto"; option go_package = "github.com/containerd/containerd/api/services/mounts/v1;mounts"; // Mounts service manages mounts service Mounts { rpc Activate(ActivateRequest) returns (ActivateResponse); rpc Deactivate(DeactivateRequest) returns (google.protobuf.Empty); rpc Info(InfoRequest) returns (InfoResponse); rpc Update(UpdateRequest) returns (UpdateResponse); rpc List(ListRequest) returns (stream ListMessage); } message ActivateRequest { string name = 1; repeated containerd.types.Mount mounts = 2; map labels = 3; bool temporary = 4; } message ActivateResponse { containerd.types.ActivationInfo info = 1; } message DeactivateRequest { string name = 1; } message InfoRequest { string name = 1; } message InfoResponse { containerd.types.ActivationInfo info = 1; } message UpdateRequest { containerd.types.ActivationInfo info = 1; google.protobuf.FieldMask update_mask = 2; } message UpdateResponse { containerd.types.ActivationInfo info = 1; } message ListRequest { repeated string filters = 1; } message ListMessage { containerd.types.ActivationInfo info = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.namespaces.v1; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; option go_package = "github.com/containerd/containerd/api/services/namespaces/v1;namespaces"; // Namespaces provides the ability to manipulate containerd namespaces. // // All objects in the system are required to be a member of a namespace. If a // namespace is deleted, all objects, including containers, images and // snapshots, will be deleted, as well. // // Unless otherwise noted, operations in containerd apply only to the namespace // supplied per request. // // I hope this goes without saying, but namespaces are themselves NOT // namespaced. service Namespaces { rpc Get(GetNamespaceRequest) returns (GetNamespaceResponse); rpc List(ListNamespacesRequest) returns (ListNamespacesResponse); rpc Create(CreateNamespaceRequest) returns (CreateNamespaceResponse); rpc Update(UpdateNamespaceRequest) returns (UpdateNamespaceResponse); rpc Delete(DeleteNamespaceRequest) returns (google.protobuf.Empty); } message Namespace { string name = 1; // Labels provides an area to include arbitrary data on namespaces. // // The combined size of a key/value pair cannot exceed 4096 bytes. // // Note that to add a new value to this field, read the existing set and // include the entire result in the update call. map labels = 2; } message GetNamespaceRequest { string name = 1; } message GetNamespaceResponse { Namespace namespace = 1; } message ListNamespacesRequest { string filter = 1; } message ListNamespacesResponse { repeated Namespace namespaces = 1; } message CreateNamespaceRequest { Namespace namespace = 1; } message CreateNamespaceResponse { Namespace namespace = 1; } // UpdateNamespaceRequest updates the metadata for a namespace. // // The operation should follow semantics described in // https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask, // unless otherwise qualified. message UpdateNamespaceRequest { // Namespace provides the target value, as declared by the mask, for the update. // // The namespace field must be set. Namespace namespace = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. // // For the most part, this applies only to selectively updating labels on // the namespace. While field masks are typically limited to ascii alphas // and digits, we just take everything after the "labels." as the map key. google.protobuf.FieldMask update_mask = 2; } message UpdateNamespaceResponse { Namespace namespace = 1; } message DeleteNamespaceRequest { string name = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; // Sandbox is a v2 runtime extension that allows more complex execution environments for containers. // This adds a notion of groups of containers that share same lifecycle and/or resources. // A few good fits for sandbox can be: // - A "pause" container in k8s, that acts as a parent process for child containers to hold network namespace. // - (micro)VMs that launch a VM process and executes containers inside guest OS. // containerd in this case remains implementation agnostic and delegates sandbox handling to runtimes. // See proposal and discussion here: https://github.com/containerd/containerd/issues/4131 package containerd.services.sandbox.v1; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; import "types/metrics.proto"; import "types/mount.proto"; import "types/platform.proto"; import "types/sandbox.proto"; option go_package = "github.com/containerd/containerd/api/services/sandbox/v1;sandbox"; // Store provides a metadata storage interface for sandboxes. Similarly to `Containers`, // sandbox object includes info required to start a new instance, but no runtime state. // When running a new sandbox instance, store objects are used as base type to create from. service Store { rpc Create(StoreCreateRequest) returns (StoreCreateResponse); rpc Update(StoreUpdateRequest) returns (StoreUpdateResponse); rpc Delete(StoreDeleteRequest) returns (StoreDeleteResponse); rpc List(StoreListRequest) returns (StoreListResponse); rpc Get(StoreGetRequest) returns (StoreGetResponse); } message StoreCreateRequest { containerd.types.Sandbox sandbox = 1; } message StoreCreateResponse { containerd.types.Sandbox sandbox = 1; } message StoreUpdateRequest { containerd.types.Sandbox sandbox = 1; repeated string fields = 2; } message StoreUpdateResponse { containerd.types.Sandbox sandbox = 1; } message StoreDeleteRequest { string sandbox_id = 1; } message StoreDeleteResponse {} message StoreListRequest { repeated string filters = 1; } message StoreListResponse { repeated containerd.types.Sandbox list = 1; } message StoreGetRequest { string sandbox_id = 1; } message StoreGetResponse { containerd.types.Sandbox sandbox = 1; } // Controller is an interface to manage runtime sandbox instances. service Controller { rpc Create(ControllerCreateRequest) returns (ControllerCreateResponse); rpc Start(ControllerStartRequest) returns (ControllerStartResponse); rpc Platform(ControllerPlatformRequest) returns (ControllerPlatformResponse); rpc Stop(ControllerStopRequest) returns (ControllerStopResponse); rpc Wait(ControllerWaitRequest) returns (ControllerWaitResponse); rpc Status(ControllerStatusRequest) returns (ControllerStatusResponse); rpc Shutdown(ControllerShutdownRequest) returns (ControllerShutdownResponse); rpc Metrics(ControllerMetricsRequest) returns (ControllerMetricsResponse); rpc Update(ControllerUpdateRequest) returns (ControllerUpdateResponse); } message ControllerCreateRequest { string sandbox_id = 1; repeated containerd.types.Mount rootfs = 2; google.protobuf.Any options = 3; string netns_path = 4; map annotations = 5; containerd.types.Sandbox sandbox = 6; string sandboxer = 10; } message ControllerCreateResponse { string sandbox_id = 1; } message ControllerStartRequest { string sandbox_id = 1; string sandboxer = 10; } message ControllerStartResponse { string sandbox_id = 1; uint32 pid = 2; google.protobuf.Timestamp created_at = 3; map labels = 4; // Address of the sandbox for containerd to connect, // for calling Task or other APIs serving in the sandbox. // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://:. string address = 5; uint32 version = 6; google.protobuf.Any spec = 7; } message ControllerPlatformRequest { string sandbox_id = 1; string sandboxer = 10; } message ControllerPlatformResponse { containerd.types.Platform platform = 1; } message ControllerStopRequest { string sandbox_id = 1; uint32 timeout_secs = 2; string sandboxer = 10; } message ControllerStopResponse {} message ControllerWaitRequest { string sandbox_id = 1; string sandboxer = 10; } message ControllerWaitResponse { uint32 exit_status = 1; google.protobuf.Timestamp exited_at = 2; } message ControllerStatusRequest { string sandbox_id = 1; bool verbose = 2; string sandboxer = 10; } message ControllerStatusResponse { string sandbox_id = 1; uint32 pid = 2; string state = 3; map info = 4; google.protobuf.Timestamp created_at = 5; google.protobuf.Timestamp exited_at = 6; google.protobuf.Any extra = 7; // Address of the sandbox for containerd to connect, // for calling Task or other APIs serving in the sandbox. // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://:. string address = 8; uint32 version = 9; } message ControllerShutdownRequest { string sandbox_id = 1; string sandboxer = 10; } message ControllerShutdownResponse {} message ControllerMetricsRequest { string sandbox_id = 1; string sandboxer = 10; } message ControllerMetricsResponse { types.Metric metrics = 1; } message ControllerUpdateRequest { string sandbox_id = 1; string sandboxer = 2; containerd.types.Sandbox sandbox = 3; repeated string fields = 4; } message ControllerUpdateResponse {} ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.snapshots.v1; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; import "types/mount.proto"; option go_package = "github.com/containerd/containerd/api/services/snapshots/v1;snapshots"; // Snapshot service manages snapshots service Snapshots { rpc Prepare(PrepareSnapshotRequest) returns (PrepareSnapshotResponse); rpc View(ViewSnapshotRequest) returns (ViewSnapshotResponse); rpc Mounts(MountsRequest) returns (MountsResponse); rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty); rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty); rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse); rpc Update(UpdateSnapshotRequest) returns (UpdateSnapshotResponse); rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse); rpc Usage(UsageRequest) returns (UsageResponse); rpc Cleanup(CleanupRequest) returns (google.protobuf.Empty); } message PrepareSnapshotRequest { string snapshotter = 1; string key = 2; string parent = 3; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 4; } message PrepareSnapshotResponse { repeated containerd.types.Mount mounts = 1; } message ViewSnapshotRequest { string snapshotter = 1; string key = 2; string parent = 3; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 4; } message ViewSnapshotResponse { repeated containerd.types.Mount mounts = 1; } message MountsRequest { string snapshotter = 1; string key = 2; } message MountsResponse { repeated containerd.types.Mount mounts = 1; } message RemoveSnapshotRequest { string snapshotter = 1; string key = 2; } message CommitSnapshotRequest { string snapshotter = 1; string name = 2; string key = 3; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 4; string parent = 5; } message StatSnapshotRequest { string snapshotter = 1; string key = 2; } enum Kind { UNKNOWN = 0; VIEW = 1; ACTIVE = 2; COMMITTED = 3; } message Info { string name = 1; string parent = 2; Kind kind = 3; // CreatedAt provides the time at which the snapshot was created. google.protobuf.Timestamp created_at = 4; // UpdatedAt provides the time the info was last updated. google.protobuf.Timestamp updated_at = 5; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 6; } message StatSnapshotResponse { Info info = 1; } message UpdateSnapshotRequest { string snapshotter = 1; Info info = 2; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. // // In info, Name, Parent, Kind, Created are immutable, // other field may be updated using this mask. // If no mask is provided, all mutable field are updated. google.protobuf.FieldMask update_mask = 3; } message UpdateSnapshotResponse { Info info = 1; } message ListSnapshotsRequest { string snapshotter = 1; // Filters contains one or more filters using the syntax defined in the // containerd filter package. // // The returned result will be those that match any of the provided // filters. Expanded, images that match the following will be // returned: // // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 2; } message ListSnapshotsResponse { repeated Info info = 1; } message UsageRequest { string snapshotter = 1; string key = 2; } message UsageResponse { int64 size = 1; int64 inodes = 2; } message CleanupRequest { string snapshotter = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.streaming.v1; import "google/protobuf/any.proto"; option go_package = "github.com/containerd/containerd/api/services/streaming/v1;streaming"; service Streaming { rpc Stream(stream google.protobuf.Any) returns (stream google.protobuf.Any); } message StreamInit { string id = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.tasks.v1; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; import "types/descriptor.proto"; import "types/metrics.proto"; import "types/mount.proto"; import "types/task/task.proto"; option go_package = "github.com/containerd/containerd/api/services/tasks/v1;tasks"; service Tasks { // Create a task. rpc Create(CreateTaskRequest) returns (CreateTaskResponse); // Start a process. rpc Start(StartRequest) returns (StartResponse); // Delete a task and on disk state. rpc Delete(DeleteTaskRequest) returns (DeleteResponse); rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse); rpc Get(GetRequest) returns (GetResponse); rpc List(ListTasksRequest) returns (ListTasksResponse); // Kill a task or process. rpc Kill(KillRequest) returns (google.protobuf.Empty); rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty); rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty); rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty); rpc Pause(PauseTaskRequest) returns (google.protobuf.Empty); rpc Resume(ResumeTaskRequest) returns (google.protobuf.Empty); rpc ListPids(ListPidsRequest) returns (ListPidsResponse); rpc Checkpoint(CheckpointTaskRequest) returns (CheckpointTaskResponse); rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty); rpc Metrics(MetricsRequest) returns (MetricsResponse); rpc Wait(WaitRequest) returns (WaitResponse); } message CreateTaskRequest { string container_id = 1; // RootFS provides the pre-chroot mounts to perform in the shim before // executing the container task. // // These are for mounts that cannot be performed in the user namespace. // Typically, these mounts should be resolved from snapshots specified on // the container object. repeated containerd.types.Mount rootfs = 3; string stdin = 4; string stdout = 5; string stderr = 6; bool terminal = 7; containerd.types.Descriptor checkpoint = 8; google.protobuf.Any options = 9; string runtime_path = 10; } message CreateTaskResponse { string container_id = 1; uint32 pid = 2; } message StartRequest { string container_id = 1; string exec_id = 2; } message StartResponse { uint32 pid = 1; } message DeleteTaskRequest { string container_id = 1; } message DeleteResponse { string id = 1; uint32 pid = 2; uint32 exit_status = 3; google.protobuf.Timestamp exited_at = 4; } message DeleteProcessRequest { string container_id = 1; string exec_id = 2; } message GetRequest { string container_id = 1; string exec_id = 2; } message GetResponse { containerd.v1.types.Process process = 1; } message ListTasksRequest { string filter = 1; } message ListTasksResponse { repeated containerd.v1.types.Process tasks = 1; } message KillRequest { string container_id = 1; string exec_id = 2; uint32 signal = 3; bool all = 4; } message ExecProcessRequest { string container_id = 1; string stdin = 2; string stdout = 3; string stderr = 4; bool terminal = 5; // Spec for starting a process in the target container. // // For runc, this is a process spec, for example. google.protobuf.Any spec = 6; // id of the exec process string exec_id = 7; } message ExecProcessResponse {} message ResizePtyRequest { string container_id = 1; string exec_id = 2; uint32 width = 3; uint32 height = 4; } message CloseIORequest { string container_id = 1; string exec_id = 2; bool stdin = 3; } message PauseTaskRequest { string container_id = 1; } message ResumeTaskRequest { string container_id = 1; } message ListPidsRequest { string container_id = 1; } message ListPidsResponse { // Processes includes the process ID and additional process information repeated containerd.v1.types.ProcessInfo processes = 1; } message CheckpointTaskRequest { string container_id = 1; string parent_checkpoint = 2; google.protobuf.Any options = 3; } message CheckpointTaskResponse { repeated containerd.types.Descriptor descriptors = 1; } message UpdateTaskRequest { string container_id = 1; google.protobuf.Any resources = 2; map annotations = 3; } message MetricsRequest { repeated string filters = 1; } message MetricsResponse { repeated types.Metric metrics = 1; } message WaitRequest { string container_id = 1; string exec_id = 2; } message WaitResponse { uint32 exit_status = 1; google.protobuf.Timestamp exited_at = 2; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.transfer.v1; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; option go_package = "github.com/containerd/containerd/api/services/transfer/v1;transfer"; service Transfer { rpc Transfer(TransferRequest) returns (google.protobuf.Empty); } message TransferRequest { google.protobuf.Any source = 1; google.protobuf.Any destination = 2; TransferOptions options = 3; } message TransferOptions { string progress_stream = 1; // Progress min interval } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.events.ttrpc.v1; import "google/protobuf/empty.proto"; import "types/event.proto"; option go_package = "github.com/containerd/containerd/api/services/ttrpc/events/v1;events"; service Events { // Forward sends an event that has already been packaged into an envelope // with a timestamp and namespace. // // This is useful if earlier timestamping is required or when forwarding on // behalf of another component, namespace or publisher. rpc Forward(ForwardRequest) returns (google.protobuf.Empty); } message ForwardRequest { containerd.types.Envelope envelope = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.version.v1; import "google/protobuf/empty.proto"; // TODO(stevvooe): Should version service actually be versioned? option go_package = "github.com/containerd/containerd/api/services/version/v1;version"; service Version { rpc Version(google.protobuf.Empty) returns (VersionResponse); } message VersionResponse { string version = 1; string revision = 2; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/descriptor.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; option go_package = "github.com/containerd/containerd/api/types;types"; // Descriptor describes a blob in a content store. // // This descriptor can be used to reference content from an // oci descriptor found in a manifest. // See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor message Descriptor { string media_type = 1; string digest = 2; int64 size = 3; map annotations = 5; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/event.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; message Envelope { option (containerd.types.fieldpath) = true; google.protobuf.Timestamp timestamp = 1; string namespace = 2; string topic = 3; google.protobuf.Any event = 4; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/fieldpath.proto ================================================ // Protocol Buffers for Go with Gadgets // // Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package containerd.types; import "google/protobuf/descriptor.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; extend google.protobuf.FileOptions { optional bool fieldpath_all = 63300; } extend google.protobuf.MessageOptions { optional bool fieldpath = 64400; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/introspection.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; message RuntimeRequest { string runtime_path = 1; // Options correspond to CreateTaskRequest.options. // This is needed to pass the runc binary path, etc. google.protobuf.Any options = 2; } message RuntimeVersion { string version = 1; string revision = 2; } message RuntimeInfo { string name = 1; RuntimeVersion version = 2; // Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.) google.protobuf.Any options = 3; // OCI-compatible runtimes should use https://github.com/opencontainers/runtime-spec/blob/main/features.md google.protobuf.Any features = 4; // Annotations of the shim. Irrelevant to features.Annotations. map annotations = 5; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/metrics.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; message Metric { google.protobuf.Timestamp timestamp = 1; string id = 2; google.protobuf.Any data = 3; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/mount.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; // Mount describes mounts for a container. // // This type is the lingua franca of ContainerD. All services provide mounts // to be used with the container at creation time. // // The Mount type follows the structure of the mount syscall, including a type, // source, target and options. message Mount { // Type defines the nature of the mount. string type = 1; // Source specifies the name of the mount. Depending on mount type, this // may be a volume name or a host path, or even ignored. string source = 2; // Target path in container string target = 3; // Options specifies zero or more fstab style mount options. repeated string options = 4; } message ActiveMount { Mount mount = 1; google.protobuf.Timestamp mounted_at = 2; string mount_point = 3; map data = 4; } message ActivationInfo { string name = 1; repeated ActiveMount active = 2; repeated Mount system = 3; map labels = 4; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/platform.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; option go_package = "github.com/containerd/containerd/api/types;types"; // Platform follows the structure of the OCI platform specification, from // descriptors. message Platform { string os = 1; string architecture = 2; string variant = 3; string os_version = 4; repeated string os_features = 5; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/runc/options/oci.proto ================================================ syntax = "proto3"; package containerd.runc.v1; option go_package = "github.com/containerd/containerd/api/types/runc/options;options"; message Options { // disable pivot root when creating a container bool no_pivot_root = 1; // create a new keyring for the container bool no_new_keyring = 2; // place the shim in a cgroup string shim_cgroup = 3; // set the I/O's pipes uid uint32 io_uid = 4; // set the I/O's pipes gid uint32 io_gid = 5; // binary name of the runc binary string binary_name = 6; // runc root directory string root = 7; // criu binary path. // // Removed in containerd v2.0: string criu_path = 8; reserved 8; // enable systemd cgroups bool systemd_cgroup = 9; // criu image path string criu_image_path = 10; // criu work path string criu_work_path = 11; // task api address, can be a unix domain socket, or vsock address. // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://:. string task_api_address = 12; // task api version, currently supported value is 2 and 3. uint32 task_api_version = 13; } message CheckpointOptions { // exit the container after a checkpoint bool exit = 1; // checkpoint open tcp connections bool open_tcp = 2; // checkpoint external unix sockets bool external_unix_sockets = 3; // checkpoint terminals (ptys) bool terminal = 4; // allow checkpointing of file locks bool file_locks = 5; // restore provided namespaces as empty namespaces repeated string empty_namespaces = 6; // set the cgroups mode, soft, full, strict string cgroups_mode = 7; // checkpoint image path string image_path = 8; // checkpoint work path string work_path = 9; } message ProcessDetails { // exec process id if the process is managed by a shim string exec_id = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/runtimeoptions/v1/api.proto ================================================ // To regenerate api.pb.go run `make protos` syntax = "proto3"; package runtimeoptions.v1; option go_package = "github.com/containerd/containerd/api/types/runtimeoptions/v1;runtimeoptions"; message Options { // TypeUrl specifies the type of the content inside the config file. string type_url = 1; // ConfigPath specifies the filesystem location of the config file // used by the runtime. string config_path = 2; // Blob specifies an in-memory TOML blob passed from containerd's configuration section // for this runtime. This will be used if config_path is not specified. bytes config_body = 3; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/sandbox.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; // Sandbox represents a sandbox metadata object that keeps all info required by controller to // work with a particular instance. message Sandbox { // SandboxID is a unique instance identifier within namespace string sandbox_id = 1; message Runtime { // Name is the name of the runtime. string name = 1; // Options specify additional runtime initialization options for the shim (this data will be available in StartShim). // Typically this data expected to be runtime shim implementation specific. google.protobuf.Any options = 2; } // Runtime specifies which runtime to use for executing this container. Runtime runtime = 2; // Spec is sandbox configuration (kin of OCI runtime spec), spec's data will be written to a config.json file in the // bundle directory (similary to OCI spec). google.protobuf.Any spec = 3; // Labels provides an area to include arbitrary data on containers. map labels = 4; // CreatedAt is the time the container was first created. google.protobuf.Timestamp created_at = 5; // UpdatedAt is the last time the container was mutated. google.protobuf.Timestamp updated_at = 6; // Extensions allow clients to provide optional blobs that can be handled by runtime. map extensions = 7; // Sandboxer is the name of the sandbox controller who manages the sandbox. string sandboxer = 10; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/task/task.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.v1.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types/task"; enum Status { UNKNOWN = 0; CREATED = 1; RUNNING = 2; STOPPED = 3; PAUSED = 4; PAUSING = 5; } message Process { string container_id = 1; string id = 2; uint32 pid = 3; Status status = 4; string stdin = 5; string stdout = 6; string stderr = 7; bool terminal = 8; uint32 exit_status = 9; google.protobuf.Timestamp exited_at = 10; } message ProcessInfo { // PID is the process ID. uint32 pid = 1; // Info contains additional process information. // // Info varies by platform. google.protobuf.Any info = 2; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/transfer/container.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types.transfer; option go_package = "github.com/containerd/containerd/api/types/transfer"; // ContainerPath represents a path within an active container's // filesystem. It acts as either a source or destination in a transfer // operation, identifying the container and path for archive operations. message ContainerPath { string container_id = 1; string path = 2; // When true and path is a directory, return only the directory entry // itself without walking into its contents. This is useful for // stat-like operations where only the directory's metadata is needed. bool no_walk = 3; // When true, preserve the UID/GID from tar headers when extracting // files. When false, extracted files are owned by the extracting // process. bool preserve_ownership = 4; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types.transfer; import "types/platform.proto"; option go_package = "github.com/containerd/containerd/api/types/transfer"; message ImageStore { string name = 1; map labels = 2; // Content filters repeated types.Platform platforms = 3; bool all_metadata = 4; uint32 manifest_limit = 5; // Import naming // extra_references are used to set image names on imports of sub-images from the index repeated ImageReference extra_references = 6; // Unpack Configuration, multiple allowed repeated UnpackConfiguration unpacks = 10; } message UnpackConfiguration { // platform is the platform to unpack for, used for resolving manifest and snapshotter // if not provided types.Platform platform = 1; // snapshotter to unpack to, if not provided default for platform shoudl be used string snapshotter = 2; } // ImageReference is used to create or find a reference for an image message ImageReference { string name = 1; // is_prefix determines whether the Name should be considered // a prefix (without tag or digest). // For lookup, this may allow matching multiple tags. // For store, this must have a tag or digest added. bool is_prefix = 2; // allow_overwrite allows overwriting or ignoring the name if // another reference is provided (such as through an annotation). // Only used if IsPrefix is true. bool allow_overwrite = 3; // add_digest adds the manifest digest to the reference. // For lookup, this allows matching tags with any digest. // For store, this allows adding the digest to the name. // Only used if IsPrefix is true. bool add_digest = 4; // skip_named_digest only considers digest references which do not // have a non-digested named reference. // For lookup, this will deduplicate digest references when there is a named match. // For store, this only adds this digest reference when there is no matching full // name reference from the prefix. // Only used if IsPrefix is true. bool skip_named_digest = 5; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/transfer/importexport.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types.transfer; import "types/platform.proto"; option go_package = "github.com/containerd/containerd/api/types/transfer"; message ImageImportStream { // Stream is used to identify the binary input stream for the import operation. // The stream uses the transfer binary stream protocol with the client as the sender. // The binary data is expected to be a raw tar stream. string stream = 1; string media_type = 2; bool force_compress = 3; } message ImageExportStream { // Stream is used to identify the binary output stream for the export operation. // The stream uses the transfer binary stream protocol with the server as the sender. // The binary data is expected to be a raw tar stream. string stream = 1; string media_type = 2; // The specified platforms repeated types.Platform platforms = 3; // Whether to include all platforms bool all_platforms = 4; // Skips the creation of the Docker compatible manifest.json file bool skip_compatibility_manifest = 5; // Excludes non-distributable blobs such as Windows base layers. bool skip_non_distributable = 6; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/transfer/progress.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types.transfer; import "types/descriptor.proto"; option go_package = "github.com/containerd/containerd/api/types/transfer"; message Progress { string event = 1; string name = 2; repeated string parents = 3; int64 progress = 4; int64 total = 5; containerd.types.Descriptor desc = 6; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/transfer/registry.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types.transfer; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types/transfer"; message OCIRegistry { string reference = 1; RegistryResolver resolver = 2; } enum HTTPDebug { DISABLED = 0; // Enable HTTP debugging DEBUG = 1; // Enable HTTP requests tracing TRACE = 2; // Enable both HTTP debugging and requests tracing BOTH = 3; } message RegistryResolver { // auth_stream is used to refer to a stream which auth callbacks may be // made on. string auth_stream = 1; // Headers map headers = 2; string host_dir = 3; string default_scheme = 4; // Force skip verify // CA callback? Client TLS callback? // Whether to debug/trace HTTP requests to OCI registry. HTTPDebug http_debug = 5; // Stream ID to use for HTTP logs (when logs are streamed to client). // When empty, logs are written to containerd logs. string logs_stream = 6; } // AuthRequest is sent as a callback on a stream message AuthRequest { // host is the registry host string host = 1; // reference is the namespace and repository name requested from the registry string reference = 2; // wwwauthenticate is the HTTP WWW-Authenticate header values returned from the registry repeated string wwwauthenticate = 3; } enum AuthType { NONE = 0; // CREDENTIALS is used to exchange username/password for access token // using an oauth or "Docker Registry Token" server CREDENTIALS = 1; // REFRESH is used to exchange secret for access token using an oauth // or "Docker Registry Token" server REFRESH = 2; // HEADER is used to set the HTTP Authorization header to secret // directly for the registry. // Value should be ` ` HEADER = 3; } message AuthResponse { AuthType authType = 1; string secret = 2; string username = 3; google.protobuf.Timestamp expire_at = 4; // TODO: Stream error } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/api/types/transfer/streaming.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types.transfer; option go_package = "github.com/containerd/containerd/api/types/transfer"; message Data { bytes data = 1; } message WindowUpdate { int32 update = 1; } // ReadStream carries data from the client to the server (import // direction). The client sends data through the stream and the // server reads it. message ReadStream { string stream = 1; string media_type = 2; } // WriteStream carries data from the server to the client (export // direction). The server writes data into the stream and the // client receives it. message WriteStream { string stream = 1; string media_type = 2; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/container.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "google/protobuf/any.proto"; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message ContainerCreate { string id = 1; string image = 2; message Runtime { string name = 1; google.protobuf.Any options = 2; } Runtime runtime = 3; } message ContainerUpdate { string id = 1; string image = 2; map labels = 3; string snapshot_key = 4; } message ContainerDelete { string id = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/content.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message ContentCreate { string digest = 1; int64 size = 2; } message ContentDelete { string digest = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/image.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.images.v1; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message ImageCreate { string name = 1; map labels = 2; } message ImageUpdate { string name = 1; map labels = 2; } message ImageDelete { string name = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/namespace.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message NamespaceCreate { string name = 1; map labels = 2; } message NamespaceUpdate { string name = 1; map labels = 2; } message NamespaceDelete { string name = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/sandbox.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; message SandboxCreate { string sandbox_id = 1; } message SandboxStart { string sandbox_id = 1; } message SandboxExit { string sandbox_id = 1; uint32 exit_status = 2; google.protobuf.Timestamp exited_at = 3; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/snapshot.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message SnapshotPrepare { string key = 1; string parent = 2; string snapshotter = 5; } message SnapshotCommit { string key = 1; string name = 2; string snapshotter = 5; } message SnapshotRemove { string key = 1; string snapshotter = 5; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/task.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "google/protobuf/timestamp.proto"; import "types/fieldpath.proto"; import "types/mount.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message TaskCreate { string container_id = 1; string bundle = 2; repeated containerd.types.Mount rootfs = 3; TaskIO io = 4; string checkpoint = 5; uint32 pid = 6; } message TaskStart { string container_id = 1; uint32 pid = 2; } message TaskDelete { string container_id = 1; uint32 pid = 2; uint32 exit_status = 3; google.protobuf.Timestamp exited_at = 4; // id is the specific exec. By default if omitted will be `""` thus matches // the init exec of the task matching `container_id`. string id = 5; } message TaskIO { string stdin = 1; string stdout = 2; string stderr = 3; bool terminal = 4; } message TaskExit { string container_id = 1; string id = 2; uint32 pid = 3; uint32 exit_status = 4; google.protobuf.Timestamp exited_at = 5; } message TaskOOM { string container_id = 1; } message TaskExecAdded { string container_id = 1; string exec_id = 2; } message TaskExecStarted { string container_id = 1; string exec_id = 2; uint32 pid = 3; } message TaskPaused { string container_id = 1; } message TaskResumed { string container_id = 1; } message TaskCheckpointed { string container_id = 1; string checkpoint = 2; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.containers.v1; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/services/containers/v1;containers"; // Containers provides metadata storage for containers used in the execution // service. // // The objects here provide an state-independent view of containers for use in // management and resource pinning. From that perspective, containers do not // have a "state" but rather this is the set of resources that will be // considered in use by the container. // // From the perspective of the execution service, these objects represent the // base parameters for creating a container process. // // In general, when looking to add fields for this type, first ask yourself // whether or not the function of the field has to do with runtime execution or // is invariant of the runtime state of the container. If it has to do with // runtime, or changes as the "container" is started and stops, it probably // doesn't belong on this object. service Containers { rpc Get(GetContainerRequest) returns (GetContainerResponse); rpc List(ListContainersRequest) returns (ListContainersResponse); rpc ListStream(ListContainersRequest) returns (stream ListContainerMessage); rpc Create(CreateContainerRequest) returns (CreateContainerResponse); rpc Update(UpdateContainerRequest) returns (UpdateContainerResponse); rpc Delete(DeleteContainerRequest) returns (google.protobuf.Empty); } message Container { // ID is the user-specified identifier. // // This field may not be updated. string id = 1; // Labels provides an area to include arbitrary data on containers. // // The combined size of a key/value pair cannot exceed 4096 bytes. // // Note that to add a new value to this field, read the existing set and // include the entire result in the update call. map labels = 2; // Image contains the reference of the image used to build the // specification and snapshots for running this container. // // If this field is updated, the spec and rootfs needed to updated, as well. string image = 3; message Runtime { // Name is the name of the runtime. string name = 1; // Options specify additional runtime initialization options. google.protobuf.Any options = 2; } // Runtime specifies which runtime to use for executing this container. Runtime runtime = 4; // Spec to be used when creating the container. This is runtime specific. google.protobuf.Any spec = 5; // Snapshotter specifies the snapshotter name used for rootfs string snapshotter = 6; // SnapshotKey specifies the snapshot key to use for the container's root // filesystem. When starting a task from this container, a caller should // look up the mounts from the snapshot service and include those on the // task create request. // // Snapshots referenced in this field will not be garbage collected. // // This field is set to empty when the rootfs is not a snapshot. // // This field may be updated. string snapshot_key = 7; // CreatedAt is the time the container was first created. google.protobuf.Timestamp created_at = 8; // UpdatedAt is the last time the container was mutated. google.protobuf.Timestamp updated_at = 9; // Extensions allow clients to provide zero or more blobs that are directly // associated with the container. One may provide protobuf, json, or other // encoding formats. The primary use of this is to further decorate the // container object with fields that may be specific to a client integration. // // The key portion of this map should identify a "name" for the extension // that should be unique against other extensions. When updating extension // data, one should only update the specified extension using field paths // to select a specific map key. map extensions = 10; // Sandbox ID this container belongs to. string sandbox = 11; } message GetContainerRequest { string id = 1; } message GetContainerResponse { Container container = 1; } message ListContainersRequest { // Filters contains one or more filters using the syntax defined in the // containerd filter package. // // The returned result will be those that match any of the provided // filters. Expanded, containers that match the following will be // returned: // // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 1; } message ListContainersResponse { repeated Container containers = 1; } message CreateContainerRequest { Container container = 1; } message CreateContainerResponse { Container container = 1; } // UpdateContainerRequest updates the metadata on one or more container. // // The operation should follow semantics described in // https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask, // unless otherwise qualified. message UpdateContainerRequest { // Container provides the target values, as declared by the mask, for the update. // // The ID field must be set. Container container = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. google.protobuf.FieldMask update_mask = 2; } message UpdateContainerResponse { Container container = 1; } message DeleteContainerRequest { string id = 1; } message ListContainerMessage { Container container = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.content.v1; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/services/content/v1;content"; // Content provides access to a content addressable storage system. service Content { // Info returns information about a committed object. // // This call can be used for getting the size of content and checking for // existence. rpc Info(InfoRequest) returns (InfoResponse); // Update updates content metadata. // // This call can be used to manage the mutable content labels. The // immutable metadata such as digest, size, and committed at cannot // be updated. rpc Update(UpdateRequest) returns (UpdateResponse); // List streams the entire set of content as Info objects and closes the // stream. // // Typically, this will yield a large response, chunked into messages. // Clients should make provisions to ensure they can handle the entire data // set. rpc List(ListContentRequest) returns (stream ListContentResponse); // Delete will delete the referenced object. rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty); // Read allows one to read an object based on the offset into the content. // // The requested data may be returned in one or more messages. rpc Read(ReadContentRequest) returns (stream ReadContentResponse); // Status returns the status for a single reference. rpc Status(StatusRequest) returns (StatusResponse); // ListStatuses returns the status of ongoing object ingestions, started via // Write. // // Only those matching the regular expression will be provided in the // response. If the provided regular expression is empty, all ingestions // will be provided. rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse); // Write begins or resumes writes to a resource identified by a unique ref. // Only one active stream may exist at a time for each ref. // // Once a write stream has started, it may only write to a single ref, thus // once a stream is started, the ref may be omitted on subsequent writes. // // For any write transaction represented by a ref, only a single write may // be made to a given offset. If overlapping writes occur, it is an error. // Writes should be sequential and implementations may throw an error if // this is required. // // If expected_digest is set and already part of the content store, the // write will fail. // // When completed, the commit flag should be set to true. If expected size // or digest is set, the content will be validated against those values. rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse); // Abort cancels the ongoing write named in the request. Any resources // associated with the write will be collected. rpc Abort(AbortRequest) returns (google.protobuf.Empty); } message Info { // Digest is the hash identity of the blob. string digest = 1; // Size is the total number of bytes in the blob. int64 size = 2; // CreatedAt provides the time at which the blob was committed. google.protobuf.Timestamp created_at = 3; // UpdatedAt provides the time the info was last updated. google.protobuf.Timestamp updated_at = 4; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 5; } message InfoRequest { string digest = 1; } message InfoResponse { Info info = 1; } message UpdateRequest { Info info = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. // // In info, Digest, Size, and CreatedAt are immutable, // other field may be updated using this mask. // If no mask is provided, all mutable field are updated. google.protobuf.FieldMask update_mask = 2; } message UpdateResponse { Info info = 1; } message ListContentRequest { // Filters contains one or more filters using the syntax defined in the // containerd filter package. // // The returned result will be those that match any of the provided // filters. Expanded, containers that match the following will be // returned: // // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 1; } message ListContentResponse { repeated Info info = 1; } message DeleteContentRequest { // Digest specifies which content to delete. string digest = 1; } // ReadContentRequest defines the fields that make up a request to read a portion of // data from a stored object. message ReadContentRequest { // Digest is the hash identity to read. string digest = 1; // Offset specifies the number of bytes from the start at which to begin // the read. If zero or less, the read will be from the start. This uses // standard zero-indexed semantics. int64 offset = 2; // size is the total size of the read. If zero, the entire blob will be // returned by the service. int64 size = 3; } // ReadContentResponse carries byte data for a read request. message ReadContentResponse { int64 offset = 1; // offset of the returned data bytes data = 2; // actual data } message Status { google.protobuf.Timestamp started_at = 1; google.protobuf.Timestamp updated_at = 2; string ref = 3; int64 offset = 4; int64 total = 5; string expected = 6; } message StatusRequest { string ref = 1; } message StatusResponse { Status status = 1; } message ListStatusesRequest { repeated string filters = 1; } message ListStatusesResponse { repeated Status statuses = 1; } // WriteAction defines the behavior of a WriteRequest. enum WriteAction { // WriteActionStat instructs the writer to return the current status while // holding the lock on the write. STAT = 0; // WriteActionWrite sets the action for the write request to write data. // // Any data included will be written at the provided offset. The // transaction will be left open for further writes. // // This is the default. WRITE = 1; // WriteActionCommit will write any outstanding data in the message and // commit the write, storing it under the digest. // // This can be used in a single message to send the data, verify it and // commit it. // // This action will always terminate the write. COMMIT = 2; } // WriteContentRequest writes data to the request ref at offset. message WriteContentRequest { // Action sets the behavior of the write. // // When this is a write and the ref is not yet allocated, the ref will be // allocated and the data will be written at offset. // // If the action is write and the ref is allocated, it will accept data to // an offset that has not yet been written. // // If the action is write and there is no data, the current write status // will be returned. This works differently from status because the stream // holds a lock. WriteAction action = 1; // Ref identifies the pre-commit object to write to. string ref = 2; // Total can be set to have the service validate the total size of the // committed content. // // The latest value before or with the commit action message will be use to // validate the content. If the offset overflows total, the service may // report an error. It is only required on one message for the write. // // If the value is zero or less, no validation of the final content will be // performed. int64 total = 3; // Expected can be set to have the service validate the final content against // the provided digest. // // If the digest is already present in the object store, an AlreadyExists // error will be returned. // // Only the latest version will be used to check the content against the // digest. It is only required to include it on a single message, before or // with the commit action message. string expected = 4; // Offset specifies the number of bytes from the start at which to begin // the write. For most implementations, this means from the start of the // file. This uses standard, zero-indexed semantics. // // If the action is write, the remote may remove all previously written // data after the offset. Implementations may support arbitrary offsets but // MUST support reseting this value to zero with a write. If an // implementation does not support a write at a particular offset, an // OutOfRange error must be returned. int64 offset = 5; // Data is the actual bytes to be written. // // If this is empty and the message is not a commit, a response will be // returned with the current write state. bytes data = 6; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 7; } // WriteContentResponse is returned on the culmination of a write call. message WriteContentResponse { // Action contains the action for the final message of the stream. A writer // should confirm that they match the intended result. WriteAction action = 1; // StartedAt provides the time at which the write began. // // This must be set for stat and commit write actions. All other write // actions may omit this. google.protobuf.Timestamp started_at = 2; // UpdatedAt provides the last time of a successful write. // // This must be set for stat and commit write actions. All other write // actions may omit this. google.protobuf.Timestamp updated_at = 3; // Offset is the current committed size for the write. int64 offset = 4; // Total provides the current, expected total size of the write. // // We include this to provide consistency with the Status structure on the // client writer. // // This is only valid on the Stat and Commit response. int64 total = 5; // Digest, if present, includes the digest up to the currently committed // bytes. If action is commit, this field will be set. It is implementation // defined if this is set for other actions. string digest = 6; } message AbortRequest { string ref = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.diff.v1; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; import "types/descriptor.proto"; import "types/mount.proto"; option go_package = "github.com/containerd/containerd/api/services/diff/v1;diff"; // Diff service creates and applies diffs service Diff { // Apply applies the content associated with the provided digests onto // the provided mounts. Archive content will be extracted and // decompressed if necessary. rpc Apply(ApplyRequest) returns (ApplyResponse); // Diff creates a diff between the given mounts and uploads the result // to the content store. rpc Diff(DiffRequest) returns (DiffResponse); } message ApplyRequest { // Diff is the descriptor of the diff to be extracted containerd.types.Descriptor diff = 1; repeated containerd.types.Mount mounts = 2; map payloads = 3; // SyncFs is to synchronize the underlying filesystem containing files. bool sync_fs = 4; } message ApplyResponse { // Applied is the descriptor for the object which was applied. // If the input was a compressed blob then the result will be // the descriptor for the uncompressed blob. containerd.types.Descriptor applied = 1; } message DiffRequest { // Left are the mounts which represent the older copy // in which is the base of the computed changes. repeated containerd.types.Mount left = 1; // Right are the mounts which represents the newer copy // in which changes from the left were made into. repeated containerd.types.Mount right = 2; // MediaType is the media type descriptor for the created diff // object string media_type = 3; // Ref identifies the pre-commit content store object. This // reference can be used to get the status from the content store. string ref = 4; // Labels are the labels to apply to the generated content // on content store commit. map labels = 5; // SourceDateEpoch specifies the timestamp used to provide control for reproducibility. // See also https://reproducible-builds.org/docs/source-date-epoch/ . // // Since containerd v2.0, the whiteout timestamps are set to zero (1970-01-01), // not to the source date epoch. google.protobuf.Timestamp source_date_epoch = 6; } message DiffResponse { // Diff is the descriptor of the diff which can be applied containerd.types.Descriptor diff = 3; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.events.v1; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "types/event.proto"; option go_package = "github.com/containerd/containerd/api/services/events/v1;events"; service Events { // Publish an event to a topic. // // The event will be packed into a timestamp envelope with the namespace // introspected from the context. The envelope will then be dispatched. rpc Publish(PublishRequest) returns (google.protobuf.Empty); // Forward sends an event that has already been packaged into an envelope // with a timestamp and namespace. // // This is useful if earlier timestamping is required or when forwarding on // behalf of another component, namespace or publisher. rpc Forward(ForwardRequest) returns (google.protobuf.Empty); // Subscribe to a stream of events, possibly returning only that match any // of the provided filters. // // Unlike many other methods in containerd, subscribers will get messages // from all namespaces unless otherwise specified. If this is not desired, // a filter can be provided in the format 'namespace==' to // restrict the received events. rpc Subscribe(SubscribeRequest) returns (stream containerd.types.Envelope); } message PublishRequest { string topic = 1; google.protobuf.Any event = 2; } message ForwardRequest { containerd.types.Envelope envelope = 1; } message SubscribeRequest { repeated string filters = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.images.v1; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; import "types/descriptor.proto"; option go_package = "github.com/containerd/containerd/api/services/images/v1;images"; // Images is a service that allows one to register images with containerd. // // In containerd, an image is merely the mapping of a name to a content root, // described by a descriptor. The behavior and state of image is purely // dictated by the type of the descriptor. // // From the perspective of this service, these references are mostly shallow, // in that the existence of the required content won't be validated until // required by consuming services. // // As such, this can really be considered a "metadata service". service Images { // Get returns an image by name. rpc Get(GetImageRequest) returns (GetImageResponse); // List returns a list of all images known to containerd. rpc List(ListImagesRequest) returns (ListImagesResponse); // Create an image record in the metadata store. // // The name of the image must be unique. rpc Create(CreateImageRequest) returns (CreateImageResponse); // Update assigns the name to a given target image based on the provided // image. rpc Update(UpdateImageRequest) returns (UpdateImageResponse); // Delete deletes the image by name. rpc Delete(DeleteImageRequest) returns (google.protobuf.Empty); } message Image { // Name provides a unique name for the image. // // Containerd treats this as the primary identifier. string name = 1; // Labels provides free form labels for the image. These are runtime only // and do not get inherited into the package image in any way. // // Labels may be updated using the field mask. // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 2; // Target describes the content entry point of the image. containerd.types.Descriptor target = 3; // CreatedAt is the time the image was first created. google.protobuf.Timestamp created_at = 7; // UpdatedAt is the last time the image was mutated. google.protobuf.Timestamp updated_at = 8; } message GetImageRequest { string name = 1; } message GetImageResponse { Image image = 1; } message CreateImageRequest { Image image = 1; google.protobuf.Timestamp source_date_epoch = 2; } message CreateImageResponse { Image image = 1; } message UpdateImageRequest { // Image provides a full or partial image for update. // // The name field must be set or an error will be returned. Image image = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. google.protobuf.FieldMask update_mask = 2; google.protobuf.Timestamp source_date_epoch = 3; } message UpdateImageResponse { Image image = 1; } message ListImagesRequest { // Filters contains one or more filters using the syntax defined in the // containerd filter package. // // The returned result will be those that match any of the provided // filters. Expanded, images that match the following will be // returned: // // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 1; } message ListImagesResponse { repeated Image images = 1; } message DeleteImageRequest { string name = 1; // Sync indicates that the delete and cleanup should be done // synchronously before returning to the caller // // Default is false bool sync = 2; // Target value for image to be deleted // // If image descriptor does not match the same digest, // the delete operation will return "not found" error. optional containerd.types.Descriptor target = 3; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.introspection.v1; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; import "google/rpc/status.proto"; import "types/introspection.proto"; import "types/platform.proto"; option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection"; service Introspection { // Plugins returns a list of plugins in containerd. // // Clients can use this to detect features and capabilities when using // containerd. rpc Plugins(PluginsRequest) returns (PluginsResponse); // Server returns information about the containerd server rpc Server(google.protobuf.Empty) returns (ServerResponse); // PluginInfo returns information directly from a plugin if the plugin supports it rpc PluginInfo(PluginInfoRequest) returns (PluginInfoResponse); } message Plugin { // Type defines the type of plugin. // // See package plugin for a list of possible values. Non core plugins may // define their own values during registration. string type = 1; // ID identifies the plugin uniquely in the system. string id = 2; // Requires lists the plugin types required by this plugin. repeated string requires = 3; // Platforms enumerates the platforms this plugin will support. // // If values are provided here, the plugin will only be operable under the // provided platforms. // // If this is empty, the plugin will work across all platforms. // // If the plugin prefers certain platforms over others, they should be // listed from most to least preferred. repeated types.Platform platforms = 4; // Exports allows plugins to provide values about state or configuration to // interested parties. // // One example is exposing the configured path of a snapshotter plugin. map exports = 5; // Capabilities allows plugins to communicate feature switches to allow // clients to detect features that may not be on be default or may be // different from version to version. // // Use this sparingly. repeated string capabilities = 6; // InitErr will be set if the plugin fails initialization. // // This means the plugin may have been registered but a non-terminal error // was encountered during initialization. // // Plugins that have this value set cannot be used. google.rpc.Status init_err = 7; } message PluginsRequest { // Filters contains one or more filters using the syntax defined in the // containerd filter package. // // The returned result will be those that match any of the provided // filters. Expanded, plugins that match the following will be // returned: // // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 1; } message PluginsResponse { repeated Plugin plugins = 1; } message ServerResponse { string uuid = 1; uint64 pid = 2; uint64 pidns = 3; // PID namespace, such as 4026531836 repeated DeprecationWarning deprecations = 4; } message DeprecationWarning { string id = 1; string message = 2; google.protobuf.Timestamp last_occurrence = 3; } message PluginInfoRequest { string type = 1; string id = 2; // Options may be used to request extra dynamic information from // a plugin. // This object is determined by the plugin and the plugin may return // NotImplemented or InvalidArgument if it is not supported google.protobuf.Any options = 3; } message PluginInfoResponse { Plugin plugin = 1; google.protobuf.Any extra = 2; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.leases.v1; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/services/leases/v1;leases"; // Leases service manages resources leases within the metadata store. service Leases { // Create creates a new lease for managing changes to metadata. A lease // can be used to protect objects from being removed. rpc Create(CreateRequest) returns (CreateResponse); // Delete deletes the lease and makes any unreferenced objects created // during the lease eligible for garbage collection if not referenced // or retained by other resources during the lease. rpc Delete(DeleteRequest) returns (google.protobuf.Empty); // List lists all active leases, returning the full list of // leases and optionally including the referenced resources. rpc List(ListRequest) returns (ListResponse); // AddResource references the resource by the provided lease. rpc AddResource(AddResourceRequest) returns (google.protobuf.Empty); // DeleteResource dereferences the resource by the provided lease. rpc DeleteResource(DeleteResourceRequest) returns (google.protobuf.Empty); // ListResources lists all the resources referenced by the lease. rpc ListResources(ListResourcesRequest) returns (ListResourcesResponse); } // Lease is an object which retains resources while it exists. message Lease { string id = 1; google.protobuf.Timestamp created_at = 2; map labels = 3; } message CreateRequest { // ID is used to identity the lease, when the id is not set the service // generates a random identifier for the lease. string id = 1; map labels = 3; } message CreateResponse { Lease lease = 1; } message DeleteRequest { string id = 1; // Sync indicates that the delete and cleanup should be done // synchronously before returning to the caller // // Default is false bool sync = 2; } message ListRequest { repeated string filters = 1; } message ListResponse { repeated Lease leases = 1; } message Resource { string id = 1; // For snapshotter resource, there are many snapshotter types here, like // overlayfs, devmapper etc. The type will be formatted with type, // like "snapshotter/overlayfs". string type = 2; } message AddResourceRequest { string id = 1; Resource resource = 2; } message DeleteResourceRequest { string id = 1; Resource resource = 2; } message ListResourcesRequest { string id = 1; } message ListResourcesResponse { repeated Resource resources = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/mounts/v1/mounts.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.mounts.v1; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "types/mount.proto"; option go_package = "github.com/containerd/containerd/api/services/mounts/v1;mounts"; // Mounts service manages mounts service Mounts { rpc Activate(ActivateRequest) returns (ActivateResponse); rpc Deactivate(DeactivateRequest) returns (google.protobuf.Empty); rpc Info(InfoRequest) returns (InfoResponse); rpc Update(UpdateRequest) returns (UpdateResponse); rpc List(ListRequest) returns (stream ListMessage); } message ActivateRequest { string name = 1; repeated containerd.types.Mount mounts = 2; map labels = 3; bool temporary = 4; } message ActivateResponse { containerd.types.ActivationInfo info = 1; } message DeactivateRequest { string name = 1; } message InfoRequest { string name = 1; } message InfoResponse { containerd.types.ActivationInfo info = 1; } message UpdateRequest { containerd.types.ActivationInfo info = 1; google.protobuf.FieldMask update_mask = 2; } message UpdateResponse { containerd.types.ActivationInfo info = 1; } message ListRequest { repeated string filters = 1; } message ListMessage { containerd.types.ActivationInfo info = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.namespaces.v1; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; option go_package = "github.com/containerd/containerd/api/services/namespaces/v1;namespaces"; // Namespaces provides the ability to manipulate containerd namespaces. // // All objects in the system are required to be a member of a namespace. If a // namespace is deleted, all objects, including containers, images and // snapshots, will be deleted, as well. // // Unless otherwise noted, operations in containerd apply only to the namespace // supplied per request. // // I hope this goes without saying, but namespaces are themselves NOT // namespaced. service Namespaces { rpc Get(GetNamespaceRequest) returns (GetNamespaceResponse); rpc List(ListNamespacesRequest) returns (ListNamespacesResponse); rpc Create(CreateNamespaceRequest) returns (CreateNamespaceResponse); rpc Update(UpdateNamespaceRequest) returns (UpdateNamespaceResponse); rpc Delete(DeleteNamespaceRequest) returns (google.protobuf.Empty); } message Namespace { string name = 1; // Labels provides an area to include arbitrary data on namespaces. // // The combined size of a key/value pair cannot exceed 4096 bytes. // // Note that to add a new value to this field, read the existing set and // include the entire result in the update call. map labels = 2; } message GetNamespaceRequest { string name = 1; } message GetNamespaceResponse { Namespace namespace = 1; } message ListNamespacesRequest { string filter = 1; } message ListNamespacesResponse { repeated Namespace namespaces = 1; } message CreateNamespaceRequest { Namespace namespace = 1; } message CreateNamespaceResponse { Namespace namespace = 1; } // UpdateNamespaceRequest updates the metadata for a namespace. // // The operation should follow semantics described in // https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask, // unless otherwise qualified. message UpdateNamespaceRequest { // Namespace provides the target value, as declared by the mask, for the update. // // The namespace field must be set. Namespace namespace = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. // // For the most part, this applies only to selectively updating labels on // the namespace. While field masks are typically limited to ascii alphas // and digits, we just take everything after the "labels." as the map key. google.protobuf.FieldMask update_mask = 2; } message UpdateNamespaceResponse { Namespace namespace = 1; } message DeleteNamespaceRequest { string name = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; // Sandbox is a v2 runtime extension that allows more complex execution environments for containers. // This adds a notion of groups of containers that share same lifecycle and/or resources. // A few good fits for sandbox can be: // - A "pause" container in k8s, that acts as a parent process for child containers to hold network namespace. // - (micro)VMs that launch a VM process and executes containers inside guest OS. // containerd in this case remains implementation agnostic and delegates sandbox handling to runtimes. // See proposal and discussion here: https://github.com/containerd/containerd/issues/4131 package containerd.services.sandbox.v1; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; import "types/metrics.proto"; import "types/mount.proto"; import "types/platform.proto"; import "types/sandbox.proto"; option go_package = "github.com/containerd/containerd/api/services/sandbox/v1;sandbox"; // Store provides a metadata storage interface for sandboxes. Similarly to `Containers`, // sandbox object includes info required to start a new instance, but no runtime state. // When running a new sandbox instance, store objects are used as base type to create from. service Store { rpc Create(StoreCreateRequest) returns (StoreCreateResponse); rpc Update(StoreUpdateRequest) returns (StoreUpdateResponse); rpc Delete(StoreDeleteRequest) returns (StoreDeleteResponse); rpc List(StoreListRequest) returns (StoreListResponse); rpc Get(StoreGetRequest) returns (StoreGetResponse); } message StoreCreateRequest { containerd.types.Sandbox sandbox = 1; } message StoreCreateResponse { containerd.types.Sandbox sandbox = 1; } message StoreUpdateRequest { containerd.types.Sandbox sandbox = 1; repeated string fields = 2; } message StoreUpdateResponse { containerd.types.Sandbox sandbox = 1; } message StoreDeleteRequest { string sandbox_id = 1; } message StoreDeleteResponse {} message StoreListRequest { repeated string filters = 1; } message StoreListResponse { repeated containerd.types.Sandbox list = 1; } message StoreGetRequest { string sandbox_id = 1; } message StoreGetResponse { containerd.types.Sandbox sandbox = 1; } // Controller is an interface to manage runtime sandbox instances. service Controller { rpc Create(ControllerCreateRequest) returns (ControllerCreateResponse); rpc Start(ControllerStartRequest) returns (ControllerStartResponse); rpc Platform(ControllerPlatformRequest) returns (ControllerPlatformResponse); rpc Stop(ControllerStopRequest) returns (ControllerStopResponse); rpc Wait(ControllerWaitRequest) returns (ControllerWaitResponse); rpc Status(ControllerStatusRequest) returns (ControllerStatusResponse); rpc Shutdown(ControllerShutdownRequest) returns (ControllerShutdownResponse); rpc Metrics(ControllerMetricsRequest) returns (ControllerMetricsResponse); rpc Update(ControllerUpdateRequest) returns (ControllerUpdateResponse); } message ControllerCreateRequest { string sandbox_id = 1; repeated containerd.types.Mount rootfs = 2; google.protobuf.Any options = 3; string netns_path = 4; map annotations = 5; containerd.types.Sandbox sandbox = 6; string sandboxer = 10; } message ControllerCreateResponse { string sandbox_id = 1; } message ControllerStartRequest { string sandbox_id = 1; string sandboxer = 10; } message ControllerStartResponse { string sandbox_id = 1; uint32 pid = 2; google.protobuf.Timestamp created_at = 3; map labels = 4; // Address of the sandbox for containerd to connect, // for calling Task or other APIs serving in the sandbox. // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://:. string address = 5; uint32 version = 6; google.protobuf.Any spec = 7; } message ControllerPlatformRequest { string sandbox_id = 1; string sandboxer = 10; } message ControllerPlatformResponse { containerd.types.Platform platform = 1; } message ControllerStopRequest { string sandbox_id = 1; uint32 timeout_secs = 2; string sandboxer = 10; } message ControllerStopResponse {} message ControllerWaitRequest { string sandbox_id = 1; string sandboxer = 10; } message ControllerWaitResponse { uint32 exit_status = 1; google.protobuf.Timestamp exited_at = 2; } message ControllerStatusRequest { string sandbox_id = 1; bool verbose = 2; string sandboxer = 10; } message ControllerStatusResponse { string sandbox_id = 1; uint32 pid = 2; string state = 3; map info = 4; google.protobuf.Timestamp created_at = 5; google.protobuf.Timestamp exited_at = 6; google.protobuf.Any extra = 7; // Address of the sandbox for containerd to connect, // for calling Task or other APIs serving in the sandbox. // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://:. string address = 8; uint32 version = 9; } message ControllerShutdownRequest { string sandbox_id = 1; string sandboxer = 10; } message ControllerShutdownResponse {} message ControllerMetricsRequest { string sandbox_id = 1; string sandboxer = 10; } message ControllerMetricsResponse { types.Metric metrics = 1; } message ControllerUpdateRequest { string sandbox_id = 1; string sandboxer = 2; containerd.types.Sandbox sandbox = 3; repeated string fields = 4; } message ControllerUpdateResponse {} ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.snapshots.v1; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; import "types/mount.proto"; option go_package = "github.com/containerd/containerd/api/services/snapshots/v1;snapshots"; // Snapshot service manages snapshots service Snapshots { rpc Prepare(PrepareSnapshotRequest) returns (PrepareSnapshotResponse); rpc View(ViewSnapshotRequest) returns (ViewSnapshotResponse); rpc Mounts(MountsRequest) returns (MountsResponse); rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty); rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty); rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse); rpc Update(UpdateSnapshotRequest) returns (UpdateSnapshotResponse); rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse); rpc Usage(UsageRequest) returns (UsageResponse); rpc Cleanup(CleanupRequest) returns (google.protobuf.Empty); } message PrepareSnapshotRequest { string snapshotter = 1; string key = 2; string parent = 3; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 4; } message PrepareSnapshotResponse { repeated containerd.types.Mount mounts = 1; } message ViewSnapshotRequest { string snapshotter = 1; string key = 2; string parent = 3; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 4; } message ViewSnapshotResponse { repeated containerd.types.Mount mounts = 1; } message MountsRequest { string snapshotter = 1; string key = 2; } message MountsResponse { repeated containerd.types.Mount mounts = 1; } message RemoveSnapshotRequest { string snapshotter = 1; string key = 2; } message CommitSnapshotRequest { string snapshotter = 1; string name = 2; string key = 3; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 4; string parent = 5; } message StatSnapshotRequest { string snapshotter = 1; string key = 2; } enum Kind { UNKNOWN = 0; VIEW = 1; ACTIVE = 2; COMMITTED = 3; } message Info { string name = 1; string parent = 2; Kind kind = 3; // CreatedAt provides the time at which the snapshot was created. google.protobuf.Timestamp created_at = 4; // UpdatedAt provides the time the info was last updated. google.protobuf.Timestamp updated_at = 5; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 6; } message StatSnapshotResponse { Info info = 1; } message UpdateSnapshotRequest { string snapshotter = 1; Info info = 2; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. // // In info, Name, Parent, Kind, Created are immutable, // other field may be updated using this mask. // If no mask is provided, all mutable field are updated. google.protobuf.FieldMask update_mask = 3; } message UpdateSnapshotResponse { Info info = 1; } message ListSnapshotsRequest { string snapshotter = 1; // Filters contains one or more filters using the syntax defined in the // containerd filter package. // // The returned result will be those that match any of the provided // filters. Expanded, images that match the following will be // returned: // // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 2; } message ListSnapshotsResponse { repeated Info info = 1; } message UsageRequest { string snapshotter = 1; string key = 2; } message UsageResponse { int64 size = 1; int64 inodes = 2; } message CleanupRequest { string snapshotter = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.streaming.v1; import "google/protobuf/any.proto"; option go_package = "github.com/containerd/containerd/api/services/streaming/v1;streaming"; service Streaming { rpc Stream(stream google.protobuf.Any) returns (stream google.protobuf.Any); } message StreamInit { string id = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.tasks.v1; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; import "types/descriptor.proto"; import "types/metrics.proto"; import "types/mount.proto"; import "types/task/task.proto"; option go_package = "github.com/containerd/containerd/api/services/tasks/v1;tasks"; service Tasks { // Create a task. rpc Create(CreateTaskRequest) returns (CreateTaskResponse); // Start a process. rpc Start(StartRequest) returns (StartResponse); // Delete a task and on disk state. rpc Delete(DeleteTaskRequest) returns (DeleteResponse); rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse); rpc Get(GetRequest) returns (GetResponse); rpc List(ListTasksRequest) returns (ListTasksResponse); // Kill a task or process. rpc Kill(KillRequest) returns (google.protobuf.Empty); rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty); rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty); rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty); rpc Pause(PauseTaskRequest) returns (google.protobuf.Empty); rpc Resume(ResumeTaskRequest) returns (google.protobuf.Empty); rpc ListPids(ListPidsRequest) returns (ListPidsResponse); rpc Checkpoint(CheckpointTaskRequest) returns (CheckpointTaskResponse); rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty); rpc Metrics(MetricsRequest) returns (MetricsResponse); rpc Wait(WaitRequest) returns (WaitResponse); } message CreateTaskRequest { string container_id = 1; // RootFS provides the pre-chroot mounts to perform in the shim before // executing the container task. // // These are for mounts that cannot be performed in the user namespace. // Typically, these mounts should be resolved from snapshots specified on // the container object. repeated containerd.types.Mount rootfs = 3; string stdin = 4; string stdout = 5; string stderr = 6; bool terminal = 7; containerd.types.Descriptor checkpoint = 8; google.protobuf.Any options = 9; string runtime_path = 10; } message CreateTaskResponse { string container_id = 1; uint32 pid = 2; } message StartRequest { string container_id = 1; string exec_id = 2; } message StartResponse { uint32 pid = 1; } message DeleteTaskRequest { string container_id = 1; } message DeleteResponse { string id = 1; uint32 pid = 2; uint32 exit_status = 3; google.protobuf.Timestamp exited_at = 4; } message DeleteProcessRequest { string container_id = 1; string exec_id = 2; } message GetRequest { string container_id = 1; string exec_id = 2; } message GetResponse { containerd.v1.types.Process process = 1; } message ListTasksRequest { string filter = 1; } message ListTasksResponse { repeated containerd.v1.types.Process tasks = 1; } message KillRequest { string container_id = 1; string exec_id = 2; uint32 signal = 3; bool all = 4; } message ExecProcessRequest { string container_id = 1; string stdin = 2; string stdout = 3; string stderr = 4; bool terminal = 5; // Spec for starting a process in the target container. // // For runc, this is a process spec, for example. google.protobuf.Any spec = 6; // id of the exec process string exec_id = 7; } message ExecProcessResponse {} message ResizePtyRequest { string container_id = 1; string exec_id = 2; uint32 width = 3; uint32 height = 4; } message CloseIORequest { string container_id = 1; string exec_id = 2; bool stdin = 3; } message PauseTaskRequest { string container_id = 1; } message ResumeTaskRequest { string container_id = 1; } message ListPidsRequest { string container_id = 1; } message ListPidsResponse { // Processes includes the process ID and additional process information repeated containerd.v1.types.ProcessInfo processes = 1; } message CheckpointTaskRequest { string container_id = 1; string parent_checkpoint = 2; google.protobuf.Any options = 3; } message CheckpointTaskResponse { repeated containerd.types.Descriptor descriptors = 1; } message UpdateTaskRequest { string container_id = 1; google.protobuf.Any resources = 2; map annotations = 3; } message MetricsRequest { repeated string filters = 1; } message MetricsResponse { repeated types.Metric metrics = 1; } message WaitRequest { string container_id = 1; string exec_id = 2; } message WaitResponse { uint32 exit_status = 1; google.protobuf.Timestamp exited_at = 2; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.transfer.v1; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; option go_package = "github.com/containerd/containerd/api/services/transfer/v1;transfer"; service Transfer { rpc Transfer(TransferRequest) returns (google.protobuf.Empty); } message TransferRequest { google.protobuf.Any source = 1; google.protobuf.Any destination = 2; TransferOptions options = 3; } message TransferOptions { string progress_stream = 1; // Progress min interval } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.events.ttrpc.v1; import "google/protobuf/empty.proto"; import "types/event.proto"; option go_package = "github.com/containerd/containerd/api/services/ttrpc/events/v1;events"; service Events { // Forward sends an event that has already been packaged into an envelope // with a timestamp and namespace. // // This is useful if earlier timestamping is required or when forwarding on // behalf of another component, namespace or publisher. rpc Forward(ForwardRequest) returns (google.protobuf.Empty); } message ForwardRequest { containerd.types.Envelope envelope = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.version.v1; import "google/protobuf/empty.proto"; // TODO(stevvooe): Should version service actually be versioned? option go_package = "github.com/containerd/containerd/api/services/version/v1;version"; service Version { rpc Version(google.protobuf.Empty) returns (VersionResponse); } message VersionResponse { string version = 1; string revision = 2; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/descriptor.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; option go_package = "github.com/containerd/containerd/api/types;types"; // Descriptor describes a blob in a content store. // // This descriptor can be used to reference content from an // oci descriptor found in a manifest. // See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor message Descriptor { string media_type = 1; string digest = 2; int64 size = 3; map annotations = 5; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/event.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; message Envelope { option (containerd.types.fieldpath) = true; google.protobuf.Timestamp timestamp = 1; string namespace = 2; string topic = 3; google.protobuf.Any event = 4; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/fieldpath.proto ================================================ // Protocol Buffers for Go with Gadgets // // Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package containerd.types; import "google/protobuf/descriptor.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; extend google.protobuf.FileOptions { optional bool fieldpath_all = 63300; } extend google.protobuf.MessageOptions { optional bool fieldpath = 64400; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/introspection.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; message RuntimeRequest { string runtime_path = 1; // Options correspond to CreateTaskRequest.options. // This is needed to pass the runc binary path, etc. google.protobuf.Any options = 2; } message RuntimeVersion { string version = 1; string revision = 2; } message RuntimeInfo { string name = 1; RuntimeVersion version = 2; // Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.) google.protobuf.Any options = 3; // OCI-compatible runtimes should use https://github.com/opencontainers/runtime-spec/blob/main/features.md google.protobuf.Any features = 4; // Annotations of the shim. Irrelevant to features.Annotations. map annotations = 5; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/metrics.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; message Metric { google.protobuf.Timestamp timestamp = 1; string id = 2; google.protobuf.Any data = 3; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/mount.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; // Mount describes mounts for a container. // // This type is the lingua franca of ContainerD. All services provide mounts // to be used with the container at creation time. // // The Mount type follows the structure of the mount syscall, including a type, // source, target and options. message Mount { // Type defines the nature of the mount. string type = 1; // Source specifies the name of the mount. Depending on mount type, this // may be a volume name or a host path, or even ignored. string source = 2; // Target path in container string target = 3; // Options specifies zero or more fstab style mount options. repeated string options = 4; } message ActiveMount { Mount mount = 1; google.protobuf.Timestamp mounted_at = 2; string mount_point = 3; map data = 4; } message ActivationInfo { string name = 1; repeated ActiveMount active = 2; repeated Mount system = 3; map labels = 4; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/platform.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; option go_package = "github.com/containerd/containerd/api/types;types"; // Platform follows the structure of the OCI platform specification, from // descriptors. message Platform { string os = 1; string architecture = 2; string variant = 3; string os_version = 4; repeated string os_features = 5; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/runc/options/oci.proto ================================================ syntax = "proto3"; package containerd.runc.v1; option go_package = "github.com/containerd/containerd/api/types/runc/options;options"; message Options { // disable pivot root when creating a container bool no_pivot_root = 1; // create a new keyring for the container bool no_new_keyring = 2; // place the shim in a cgroup string shim_cgroup = 3; // set the I/O's pipes uid uint32 io_uid = 4; // set the I/O's pipes gid uint32 io_gid = 5; // binary name of the runc binary string binary_name = 6; // runc root directory string root = 7; // criu binary path. // // Removed in containerd v2.0: string criu_path = 8; reserved 8; // enable systemd cgroups bool systemd_cgroup = 9; // criu image path string criu_image_path = 10; // criu work path string criu_work_path = 11; // task api address, can be a unix domain socket, or vsock address. // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://:. string task_api_address = 12; // task api version, currently supported value is 2 and 3. uint32 task_api_version = 13; } message CheckpointOptions { // exit the container after a checkpoint bool exit = 1; // checkpoint open tcp connections bool open_tcp = 2; // checkpoint external unix sockets bool external_unix_sockets = 3; // checkpoint terminals (ptys) bool terminal = 4; // allow checkpointing of file locks bool file_locks = 5; // restore provided namespaces as empty namespaces repeated string empty_namespaces = 6; // set the cgroups mode, soft, full, strict string cgroups_mode = 7; // checkpoint image path string image_path = 8; // checkpoint work path string work_path = 9; } message ProcessDetails { // exec process id if the process is managed by a shim string exec_id = 1; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/runtimeoptions/v1/api.proto ================================================ // To regenerate api.pb.go run `make protos` syntax = "proto3"; package runtimeoptions.v1; option go_package = "github.com/containerd/containerd/api/types/runtimeoptions/v1;runtimeoptions"; message Options { // TypeUrl specifies the type of the content inside the config file. string type_url = 1; // ConfigPath specifies the filesystem location of the config file // used by the runtime. string config_path = 2; // Blob specifies an in-memory TOML blob passed from containerd's configuration section // for this runtime. This will be used if config_path is not specified. bytes config_body = 3; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/sandbox.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; // Sandbox represents a sandbox metadata object that keeps all info required by controller to // work with a particular instance. message Sandbox { // SandboxID is a unique instance identifier within namespace string sandbox_id = 1; message Runtime { // Name is the name of the runtime. string name = 1; // Options specify additional runtime initialization options for the shim (this data will be available in StartShim). // Typically this data expected to be runtime shim implementation specific. google.protobuf.Any options = 2; } // Runtime specifies which runtime to use for executing this container. Runtime runtime = 2; // Spec is sandbox configuration (kin of OCI runtime spec), spec's data will be written to a config.json file in the // bundle directory (similary to OCI spec). google.protobuf.Any spec = 3; // Labels provides an area to include arbitrary data on containers. map labels = 4; // CreatedAt is the time the container was first created. google.protobuf.Timestamp created_at = 5; // UpdatedAt is the last time the container was mutated. google.protobuf.Timestamp updated_at = 6; // Extensions allow clients to provide optional blobs that can be handled by runtime. map extensions = 7; // Sandboxer is the name of the sandbox controller who manages the sandbox. string sandboxer = 10; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/task/task.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.v1.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types/task"; enum Status { UNKNOWN = 0; CREATED = 1; RUNNING = 2; STOPPED = 3; PAUSED = 4; PAUSING = 5; } message Process { string container_id = 1; string id = 2; uint32 pid = 3; Status status = 4; string stdin = 5; string stdout = 6; string stderr = 7; bool terminal = 8; uint32 exit_status = 9; google.protobuf.Timestamp exited_at = 10; } message ProcessInfo { // PID is the process ID. uint32 pid = 1; // Info contains additional process information. // // Info varies by platform. google.protobuf.Any info = 2; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/transfer/container.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types.transfer; option go_package = "github.com/containerd/containerd/api/types/transfer"; // ContainerPath represents a path within an active container's // filesystem. It acts as either a source or destination in a transfer // operation, identifying the container and path for archive operations. message ContainerPath { string container_id = 1; string path = 2; // When true and path is a directory, return only the directory entry // itself without walking into its contents. This is useful for // stat-like operations where only the directory's metadata is needed. bool no_walk = 3; // When true, preserve the UID/GID from tar headers when extracting // files. When false, extracted files are owned by the extracting // process. bool preserve_ownership = 4; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types.transfer; import "types/platform.proto"; option go_package = "github.com/containerd/containerd/api/types/transfer"; message ImageStore { string name = 1; map labels = 2; // Content filters repeated types.Platform platforms = 3; bool all_metadata = 4; uint32 manifest_limit = 5; // Import naming // extra_references are used to set image names on imports of sub-images from the index repeated ImageReference extra_references = 6; // Unpack Configuration, multiple allowed repeated UnpackConfiguration unpacks = 10; } message UnpackConfiguration { // platform is the platform to unpack for, used for resolving manifest and snapshotter // if not provided types.Platform platform = 1; // snapshotter to unpack to, if not provided default for platform shoudl be used string snapshotter = 2; } // ImageReference is used to create or find a reference for an image message ImageReference { string name = 1; // is_prefix determines whether the Name should be considered // a prefix (without tag or digest). // For lookup, this may allow matching multiple tags. // For store, this must have a tag or digest added. bool is_prefix = 2; // allow_overwrite allows overwriting or ignoring the name if // another reference is provided (such as through an annotation). // Only used if IsPrefix is true. bool allow_overwrite = 3; // add_digest adds the manifest digest to the reference. // For lookup, this allows matching tags with any digest. // For store, this allows adding the digest to the name. // Only used if IsPrefix is true. bool add_digest = 4; // skip_named_digest only considers digest references which do not // have a non-digested named reference. // For lookup, this will deduplicate digest references when there is a named match. // For store, this only adds this digest reference when there is no matching full // name reference from the prefix. // Only used if IsPrefix is true. bool skip_named_digest = 5; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/transfer/importexport.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types.transfer; import "types/platform.proto"; option go_package = "github.com/containerd/containerd/api/types/transfer"; message ImageImportStream { // Stream is used to identify the binary input stream for the import operation. // The stream uses the transfer binary stream protocol with the client as the sender. // The binary data is expected to be a raw tar stream. string stream = 1; string media_type = 2; bool force_compress = 3; } message ImageExportStream { // Stream is used to identify the binary output stream for the export operation. // The stream uses the transfer binary stream protocol with the server as the sender. // The binary data is expected to be a raw tar stream. string stream = 1; string media_type = 2; // The specified platforms repeated types.Platform platforms = 3; // Whether to include all platforms bool all_platforms = 4; // Skips the creation of the Docker compatible manifest.json file bool skip_compatibility_manifest = 5; // Excludes non-distributable blobs such as Windows base layers. bool skip_non_distributable = 6; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/transfer/progress.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types.transfer; import "types/descriptor.proto"; option go_package = "github.com/containerd/containerd/api/types/transfer"; message Progress { string event = 1; string name = 2; repeated string parents = 3; int64 progress = 4; int64 total = 5; containerd.types.Descriptor desc = 6; } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/transfer/registry.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types.transfer; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types/transfer"; message OCIRegistry { string reference = 1; RegistryResolver resolver = 2; } enum HTTPDebug { DISABLED = 0; // Enable HTTP debugging DEBUG = 1; // Enable HTTP requests tracing TRACE = 2; // Enable both HTTP debugging and requests tracing BOTH = 3; } message RegistryResolver { // auth_stream is used to refer to a stream which auth callbacks may be // made on. string auth_stream = 1; // Headers map headers = 2; string host_dir = 3; string default_scheme = 4; // Force skip verify // CA callback? Client TLS callback? // Whether to debug/trace HTTP requests to OCI registry. HTTPDebug http_debug = 5; // Stream ID to use for HTTP logs (when logs are streamed to client). // When empty, logs are written to containerd logs. string logs_stream = 6; } // AuthRequest is sent as a callback on a stream message AuthRequest { // host is the registry host string host = 1; // reference is the namespace and repository name requested from the registry string reference = 2; // wwwauthenticate is the HTTP WWW-Authenticate header values returned from the registry repeated string wwwauthenticate = 3; } enum AuthType { NONE = 0; // CREDENTIALS is used to exchange username/password for access token // using an oauth or "Docker Registry Token" server CREDENTIALS = 1; // REFRESH is used to exchange secret for access token using an oauth // or "Docker Registry Token" server REFRESH = 2; // HEADER is used to set the HTTP Authorization header to secret // directly for the registry. // Value should be ` ` HEADER = 3; } message AuthResponse { AuthType authType = 1; string secret = 2; string username = 3; google.protobuf.Timestamp expire_at = 4; // TODO: Stream error } ================================================ FILE: crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/transfer/streaming.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types.transfer; option go_package = "github.com/containerd/containerd/api/types/transfer"; message Data { bytes data = 1; } message WindowUpdate { int32 update = 1; } // ReadStream carries data from the client to the server (import // direction). The client sends data through the stream and the // server reads it. message ReadStream { string stream = 1; string media_type = 2; } // WriteStream carries data from the server to the client (export // direction). The server writes data into the stream and the // client receives it. message WriteStream { string stream = 1; string media_type = 2; } ================================================ FILE: crates/client/vendor/google/protobuf/any.proto ================================================ // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option go_package = "google.golang.org/protobuf/types/known/anypb"; option java_package = "com.google.protobuf"; option java_outer_classname = "AnyProto"; option java_multiple_files = true; option objc_class_prefix = "GPB"; // `Any` contains an arbitrary serialized protocol buffer message along with a // URL that describes the type of the serialized message. // // Protobuf library provides support to pack/unpack Any values in the form // of utility functions or additional generated methods of the Any type. // // Example 1: Pack and unpack a message in C++. // // Foo foo = ...; // Any any; // any.PackFrom(foo); // ... // if (any.UnpackTo(&foo)) { // ... // } // // Example 2: Pack and unpack a message in Java. // // Foo foo = ...; // Any any = Any.pack(foo); // ... // if (any.is(Foo.class)) { // foo = any.unpack(Foo.class); // } // // Example 3: Pack and unpack a message in Python. // // foo = Foo(...) // any = Any() // any.Pack(foo) // ... // if any.Is(Foo.DESCRIPTOR): // any.Unpack(foo) // ... // // Example 4: Pack and unpack a message in Go // // foo := &pb.Foo{...} // any, err := anypb.New(foo) // if err != nil { // ... // } // ... // foo := &pb.Foo{} // if err := any.UnmarshalTo(foo); err != nil { // ... // } // // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' // in the type URL, for example "foo.bar.com/x/y.z" will yield type // name "y.z". // // // JSON // ==== // The JSON representation of an `Any` value uses the regular // representation of the deserialized, embedded message, with an // additional field `@type` which contains the type URL. Example: // // package google.profile; // message Person { // string first_name = 1; // string last_name = 2; // } // // { // "@type": "type.googleapis.com/google.profile.Person", // "firstName": , // "lastName": // } // // If the embedded message type is well-known and has a custom JSON // representation, that representation will be embedded adding a field // `value` which holds the custom JSON in addition to the `@type` // field. Example (for message [google.protobuf.Duration][]): // // { // "@type": "type.googleapis.com/google.protobuf.Duration", // "value": "1.212s" // } // message Any { // A URL/resource name that uniquely identifies the type of the serialized // protocol buffer message. This string must contain at least // one "/" character. The last segment of the URL's path must represent // the fully qualified name of the type (as in // `path/google.protobuf.Duration`). The name should be in a canonical form // (e.g., leading "." is not accepted). // // In practice, teams usually precompile into the binary all types that they // expect it to use in the context of Any. However, for URLs which use the // scheme `http`, `https`, or no scheme, one can optionally set up a type // server that maps type URLs to message definitions as follows: // // * If no scheme is provided, `https` is assumed. // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the // URL, or have them precompiled into a binary to avoid any // lookup. Therefore, binary compatibility needs to be preserved // on changes to types. (Use versioned type names to manage // breaking changes.) // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with // type.googleapis.com. // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // string type_url = 1; // Must be a valid serialized protocol buffer of the above specified type. bytes value = 2; } ================================================ FILE: crates/client/vendor/google/protobuf/descriptor.proto ================================================ // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Author: kenton@google.com (Kenton Varda) // Based on original Protocol Buffers design by // Sanjay Ghemawat, Jeff Dean, and others. // // The messages in this file describe the definitions found in .proto files. // A valid .proto file can be translated directly to a FileDescriptorProto // without any other information (e.g. without reading its imports). syntax = "proto2"; package google.protobuf; option go_package = "google.golang.org/protobuf/types/descriptorpb"; option java_package = "com.google.protobuf"; option java_outer_classname = "DescriptorProtos"; option csharp_namespace = "Google.Protobuf.Reflection"; option objc_class_prefix = "GPB"; option cc_enable_arenas = true; // descriptor.proto must be optimized for speed because reflection-based // algorithms don't work during bootstrapping. option optimize_for = SPEED; // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. message FileDescriptorSet { repeated FileDescriptorProto file = 1; } // Describes a complete .proto file. message FileDescriptorProto { optional string name = 1; // file name, relative to root of source tree optional string package = 2; // e.g. "foo", "foo.bar", etc. // Names of files imported by this file. repeated string dependency = 3; // Indexes of the public imported files in the dependency list above. repeated int32 public_dependency = 10; // Indexes of the weak imported files in the dependency list. // For Google-internal migration only. Do not use. repeated int32 weak_dependency = 11; // All top-level definitions in this file. repeated DescriptorProto message_type = 4; repeated EnumDescriptorProto enum_type = 5; repeated ServiceDescriptorProto service = 6; repeated FieldDescriptorProto extension = 7; optional FileOptions options = 8; // This field contains optional information about the original source code. // You may safely remove this entire field without harming runtime // functionality of the descriptors -- the information is needed only by // development tools. optional SourceCodeInfo source_code_info = 9; // The syntax of the proto file. // The supported values are "proto2" and "proto3". optional string syntax = 12; } // Describes a message type. message DescriptorProto { optional string name = 1; repeated FieldDescriptorProto field = 2; repeated FieldDescriptorProto extension = 6; repeated DescriptorProto nested_type = 3; repeated EnumDescriptorProto enum_type = 4; message ExtensionRange { optional int32 start = 1; // Inclusive. optional int32 end = 2; // Exclusive. optional ExtensionRangeOptions options = 3; } repeated ExtensionRange extension_range = 5; repeated OneofDescriptorProto oneof_decl = 8; optional MessageOptions options = 7; // Range of reserved tag numbers. Reserved tag numbers may not be used by // fields or extension ranges in the same message. Reserved ranges may // not overlap. message ReservedRange { optional int32 start = 1; // Inclusive. optional int32 end = 2; // Exclusive. } repeated ReservedRange reserved_range = 9; // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. repeated string reserved_name = 10; } message ExtensionRangeOptions { // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } // Describes a field within a message. message FieldDescriptorProto { enum Type { // 0 is reserved for errors. // Order is weird for historical reasons. TYPE_DOUBLE = 1; TYPE_FLOAT = 2; // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if // negative values are likely. TYPE_INT64 = 3; TYPE_UINT64 = 4; // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if // negative values are likely. TYPE_INT32 = 5; TYPE_FIXED64 = 6; TYPE_FIXED32 = 7; TYPE_BOOL = 8; TYPE_STRING = 9; // Tag-delimited aggregate. // Group type is deprecated and not supported in proto3. However, Proto3 // implementations should still be able to parse the group wire format and // treat group fields as unknown fields. TYPE_GROUP = 10; TYPE_MESSAGE = 11; // Length-delimited aggregate. // New in version 2. TYPE_BYTES = 12; TYPE_UINT32 = 13; TYPE_ENUM = 14; TYPE_SFIXED32 = 15; TYPE_SFIXED64 = 16; TYPE_SINT32 = 17; // Uses ZigZag encoding. TYPE_SINT64 = 18; // Uses ZigZag encoding. } enum Label { // 0 is reserved for errors LABEL_OPTIONAL = 1; LABEL_REQUIRED = 2; LABEL_REPEATED = 3; } optional string name = 1; optional int32 number = 3; optional Label label = 4; // If type_name is set, this need not be set. If both this and type_name // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. optional Type type = 5; // For message and enum types, this is the name of the type. If the name // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping // rules are used to find the type (i.e. first the nested types within this // message are searched, then within the parent, on up to the root // namespace). optional string type_name = 6; // For extensions, this is the name of the type being extended. It is // resolved in the same manner as type_name. optional string extendee = 2; // For numeric types, contains the original text representation of the value. // For booleans, "true" or "false". // For strings, contains the default text contents (not escaped in any way). // For bytes, contains the C escaped value. All bytes >= 128 are escaped. // TODO(kenton): Base-64 encode? optional string default_value = 7; // If set, gives the index of a oneof in the containing type's oneof_decl // list. This field is a member of that oneof. optional int32 oneof_index = 9; // JSON name of this field. The value is set by protocol compiler. If the // user has set a "json_name" option on this field, that option's value // will be used. Otherwise, it's deduced from the field's name by converting // it to camelCase. optional string json_name = 10; optional FieldOptions options = 8; // If true, this is a proto3 "optional". When a proto3 field is optional, it // tracks presence regardless of field type. // // When proto3_optional is true, this field must be belong to a oneof to // signal to old proto3 clients that presence is tracked for this field. This // oneof is known as a "synthetic" oneof, and this field must be its sole // member (each proto3 optional field gets its own synthetic oneof). Synthetic // oneofs exist in the descriptor only, and do not generate any API. Synthetic // oneofs must be ordered after all "real" oneofs. // // For message fields, proto3_optional doesn't create any semantic change, // since non-repeated message fields always track presence. However it still // indicates the semantic detail of whether the user wrote "optional" or not. // This can be useful for round-tripping the .proto file. For consistency we // give message fields a synthetic oneof also, even though it is not required // to track presence. This is especially important because the parser can't // tell if a field is a message or an enum, so it must always create a // synthetic oneof. // // Proto2 optional fields do not set this flag, because they already indicate // optional with `LABEL_OPTIONAL`. optional bool proto3_optional = 17; } // Describes a oneof. message OneofDescriptorProto { optional string name = 1; optional OneofOptions options = 2; } // Describes an enum type. message EnumDescriptorProto { optional string name = 1; repeated EnumValueDescriptorProto value = 2; optional EnumOptions options = 3; // Range of reserved numeric values. Reserved values may not be used by // entries in the same enum. Reserved ranges may not overlap. // // Note that this is distinct from DescriptorProto.ReservedRange in that it // is inclusive such that it can appropriately represent the entire int32 // domain. message EnumReservedRange { optional int32 start = 1; // Inclusive. optional int32 end = 2; // Inclusive. } // Range of reserved numeric values. Reserved numeric values may not be used // by enum values in the same enum declaration. Reserved ranges may not // overlap. repeated EnumReservedRange reserved_range = 4; // Reserved enum value names, which may not be reused. A given name may only // be reserved once. repeated string reserved_name = 5; } // Describes a value within an enum. message EnumValueDescriptorProto { optional string name = 1; optional int32 number = 2; optional EnumValueOptions options = 3; } // Describes a service. message ServiceDescriptorProto { optional string name = 1; repeated MethodDescriptorProto method = 2; optional ServiceOptions options = 3; } // Describes a method of a service. message MethodDescriptorProto { optional string name = 1; // Input and output type names. These are resolved in the same way as // FieldDescriptorProto.type_name, but must refer to a message type. optional string input_type = 2; optional string output_type = 3; optional MethodOptions options = 4; // Identifies if client streams multiple client messages optional bool client_streaming = 5 [default = false]; // Identifies if server streams multiple server messages optional bool server_streaming = 6 [default = false]; } // =================================================================== // Options // Each of the definitions above may have "options" attached. These are // just annotations which may cause code to be generated slightly differently // or may contain hints for code that manipulates protocol messages. // // Clients may define custom options as extensions of the *Options messages. // These extensions may not yet be known at parsing time, so the parser cannot // store the values in them. Instead it stores them in a field in the *Options // message called uninterpreted_option. This field must have the same name // across all *Options messages. We then use this field to populate the // extensions when we build a descriptor, at which point all protos have been // parsed and so all extensions are known. // // Extension numbers for custom options may be chosen as follows: // * For options which will only be used within a single application or // organization, or for experimental options, use field numbers 50000 // through 99999. It is up to you to ensure that you do not use the // same number for multiple options. // * For options which will be published and used publicly by multiple // independent entities, e-mail protobuf-global-extension-registry@google.com // to reserve extension numbers. Simply provide your project name (e.g. // Objective-C plugin) and your project website (if available) -- there's no // need to explain how you intend to use them. Usually you only need one // extension number. You can declare multiple options with only one extension // number by putting them in a sub-message. See the Custom Options section of // the docs for examples: // https://developers.google.com/protocol-buffers/docs/proto#options // If this turns out to be popular, a web service will be set up // to automatically assign option numbers. message FileOptions { // Sets the Java package where classes generated from this .proto will be // placed. By default, the proto package is used, but this is often // inappropriate because proto packages do not normally start with backwards // domain names. optional string java_package = 1; // Controls the name of the wrapper Java class generated for the .proto file. // That class will always contain the .proto file's getDescriptor() method as // well as any top-level extensions defined in the .proto file. // If java_multiple_files is disabled, then all the other classes from the // .proto file will be nested inside the single wrapper outer class. optional string java_outer_classname = 8; // If enabled, then the Java code generator will generate a separate .java // file for each top-level message, enum, and service defined in the .proto // file. Thus, these types will *not* be nested inside the wrapper class // named by java_outer_classname. However, the wrapper class will still be // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. optional bool java_multiple_files = 10 [default = false]; // This option does nothing. optional bool java_generate_equals_and_hash = 20 [deprecated=true]; // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. // Message reflection will do the same. // However, an extension field still accepts non-UTF-8 byte sequences. // This option has no effect on when used with the lite runtime. optional bool java_string_check_utf8 = 27 [default = false]; // Generated classes can be optimized for speed or code size. enum OptimizeMode { SPEED = 1; // Generate complete code for parsing, serialization, // etc. CODE_SIZE = 2; // Use ReflectionOps to implement these methods. LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. } optional OptimizeMode optimize_for = 9 [default = SPEED]; // Sets the Go package where structs generated from this .proto will be // placed. If omitted, the Go package will be derived from the following: // - The basename of the package import path, if provided. // - Otherwise, the package statement in the .proto file, if present. // - Otherwise, the basename of the .proto file, without extension. optional string go_package = 11; // Should generic services be generated in each language? "Generic" services // are not specific to any particular RPC system. They are generated by the // main code generators in each language (without additional plugins). // Generic services were the only kind of service generation supported by // early versions of google.protobuf. // // Generic services are now considered deprecated in favor of using plugins // that generate code specific to your particular RPC system. Therefore, // these default to false. Old code which depends on generic services should // explicitly set them to true. optional bool cc_generic_services = 16 [default = false]; optional bool java_generic_services = 17 [default = false]; optional bool py_generic_services = 18 [default = false]; optional bool php_generic_services = 42 [default = false]; // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very // least, this is a formalization for deprecating files. optional bool deprecated = 23 [default = false]; // Enables the use of arenas for the proto messages in this file. This applies // only to generated classes for C++. optional bool cc_enable_arenas = 31 [default = true]; // Sets the objective c class prefix which is prepended to all objective c // generated classes from this .proto. There is no default. optional string objc_class_prefix = 36; // Namespace for generated classes; defaults to the package. optional string csharp_namespace = 37; // By default Swift generators will take the proto package and CamelCase it // replacing '.' with underscore and use that to prefix the types/symbols // defined. When this options is provided, they will use this value instead // to prefix the types/symbols defined. optional string swift_prefix = 39; // Sets the php class prefix which is prepended to all php generated classes // from this .proto. Default is empty. optional string php_class_prefix = 40; // Use this option to change the namespace of php generated classes. Default // is empty. When this option is empty, the package name will be used for // determining the namespace. optional string php_namespace = 41; // Use this option to change the namespace of php generated metadata classes. // Default is empty. When this option is empty, the proto file name will be // used for determining the namespace. optional string php_metadata_namespace = 44; // Use this option to change the package of ruby generated classes. Default // is empty. When this option is not set, the package name will be used for // determining the ruby package. optional string ruby_package = 45; // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. // See the documentation for the "Options" section above. extensions 1000 to max; reserved 38; } message MessageOptions { // Set true to use the old proto1 MessageSet wire format for extensions. // This is provided for backwards-compatibility with the MessageSet wire // format. You should not use this for any other reason: It's less // efficient, has fewer features, and is more complicated. // // The message must be defined exactly as follows: // message Foo { // option message_set_wire_format = true; // extensions 4 to max; // } // Note that the message cannot have any defined fields; MessageSets only // have extensions. // // All extensions of your type must be singular messages; e.g. they cannot // be int32s, enums, or repeated messages. // // Because this is an option, the above two restrictions are not enforced by // the protocol compiler. optional bool message_set_wire_format = 1 [default = false]; // Disables the generation of the standard "descriptor()" accessor, which can // conflict with a field of the same name. This is meant to make migration // from proto1 easier; new code should avoid fields named "descriptor". optional bool no_standard_descriptor_accessor = 2 [default = false]; // Is this message deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. optional bool deprecated = 3 [default = false]; reserved 4, 5, 6; // Whether the message is an automatically generated map entry type for the // maps field. // // For maps fields: // map map_field = 1; // The parsed descriptor looks like: // message MapFieldEntry { // option map_entry = true; // optional KeyType key = 1; // optional ValueType value = 2; // } // repeated MapFieldEntry map_field = 1; // // Implementations may choose not to generate the map_entry=true message, but // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. // // NOTE: Do not set the option in .proto files. Always use the maps syntax // instead. The option should only be implicitly set by the proto compiler // parser. optional bool map_entry = 7; reserved 8; // javalite_serializable reserved 9; // javanano_as_lite // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message FieldOptions { // The ctype option instructs the C++ code generator to use a different // representation of the field than it normally would. See the specific // options below. This option is not yet implemented in the open source // release -- sorry, we'll try to include it in a future version! optional CType ctype = 1 [default = STRING]; enum CType { // Default mode. STRING = 0; CORD = 1; STRING_PIECE = 2; } // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as // a single length-delimited blob. In proto3, only explicit setting it to // false will avoid using packed encoding. optional bool packed = 2; // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING // is represented as JavaScript string, which avoids loss of precision that // can happen when a large value is converted to a floating point JavaScript. // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to // use the JavaScript "number" type. The behavior of the default option // JS_NORMAL is implementation dependent. // // This option is an enum to permit additional types to be added, e.g. // goog.math.Integer. optional JSType jstype = 6 [default = JS_NORMAL]; enum JSType { // Use the default type. JS_NORMAL = 0; // Use JavaScript strings. JS_STRING = 1; // Use JavaScript numbers. JS_NUMBER = 2; } // Should this field be parsed lazily? Lazy applies only to message-type // fields. It means that when the outer message is initially parsed, the // inner message's contents will not be parsed but instead stored in encoded // form. The inner message will actually be parsed when it is first accessed. // // This is only a hint. Implementations are free to choose whether to use // eager or lazy parsing regardless of the value of this option. However, // setting this option true suggests that the protocol author believes that // using lazy parsing on this field is worth the additional bookkeeping // overhead typically needed to implement it. // // This option does not affect the public interface of any generated code; // all method signatures remain the same. Furthermore, thread-safety of the // interface is not affected by this option; const methods remain safe to // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // // // Note that implementations may choose not to check required fields within // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. // This is necessary because otherwise the inner message would have to be // parsed in order to perform the check, defeating the purpose of lazy // parsing. An implementation which chooses not to check required fields // must be consistent about it. That is, for any particular sub-message, the // implementation must either *always* check its required fields, or *never* // check its required fields, regardless of whether or not the message has // been parsed. optional bool lazy = 5 [default = false]; // Is this field deprecated? // Depending on the target platform, this can emit Deprecated annotations // for accessors, or it will be completely ignored; in the very least, this // is a formalization for deprecating fields. optional bool deprecated = 3 [default = false]; // For Google-internal migration only. Do not use. optional bool weak = 10 [default = false]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; reserved 4; // removed jtype } message OneofOptions { // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message EnumOptions { // Set this option to true to allow mapping different tag names to the same // value. optional bool allow_alias = 2; // Is this enum deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the enum, or it will be completely ignored; in the very least, this // is a formalization for deprecating enums. optional bool deprecated = 3 [default = false]; reserved 5; // javanano_as_lite // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message EnumValueOptions { // Is this enum value deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the enum value, or it will be completely ignored; in the very least, // this is a formalization for deprecating enum values. optional bool deprecated = 1 [default = false]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message ServiceOptions { // Note: Field numbers 1 through 32 are reserved for Google's internal RPC // framework. We apologize for hoarding these numbers to ourselves, but // we were already using them long before we decided to release Protocol // Buffers. // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the service, or it will be completely ignored; in the very least, // this is a formalization for deprecating services. optional bool deprecated = 33 [default = false]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message MethodOptions { // Note: Field numbers 1 through 32 are reserved for Google's internal RPC // framework. We apologize for hoarding these numbers to ourselves, but // we were already using them long before we decided to release Protocol // Buffers. // Is this method deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the method, or it will be completely ignored; in the very least, // this is a formalization for deprecating methods. optional bool deprecated = 33 [default = false]; // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, // or neither? HTTP based RPC implementation may choose GET verb for safe // methods, and PUT verb for idempotent methods instead of the default POST. enum IdempotencyLevel { IDEMPOTENCY_UNKNOWN = 0; NO_SIDE_EFFECTS = 1; // implies idempotent IDEMPOTENT = 2; // idempotent, but may have side effects } optional IdempotencyLevel idempotency_level = 34 [default = IDEMPOTENCY_UNKNOWN]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } // A message representing a option the parser does not recognize. This only // appears in options protos created by the compiler::Parser class. // DescriptorPool resolves these when building Descriptor objects. Therefore, // options protos in descriptor objects (e.g. returned by Descriptor::options(), // or produced by Descriptor::CopyTo()) will never have UninterpretedOptions // in them. message UninterpretedOption { // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents // "foo.(bar.baz).qux". message NamePart { required string name_part = 1; required bool is_extension = 2; } repeated NamePart name = 2; // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. optional string identifier_value = 3; optional uint64 positive_int_value = 4; optional int64 negative_int_value = 5; optional double double_value = 6; optional bytes string_value = 7; optional string aggregate_value = 8; } // =================================================================== // Optional source code info // Encapsulates information about the original source file from which a // FileDescriptorProto was generated. message SourceCodeInfo { // A Location identifies a piece of source code in a .proto file which // corresponds to a particular definition. This information is intended // to be useful to IDEs, code indexers, documentation generators, and similar // tools. // // For example, say we have a file like: // message Foo { // optional string foo = 1; // } // Let's look at just the field definition: // optional string foo = 1; // ^ ^^ ^^ ^ ^^^ // a bc de f ghi // We have the following locations: // span path represents // [a,i) [ 4, 0, 2, 0 ] The whole field definition. // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). // // Notes: // - A location may refer to a repeated field itself (i.e. not to any // particular index within it). This is used whenever a set of elements are // logically enclosed in a single code segment. For example, an entire // extend block (possibly containing multiple extension definitions) will // have an outer location whose path refers to the "extensions" repeated // field without an index. // - Multiple locations may have the same path. This happens when a single // logical declaration is spread out across multiple places. The most // obvious example is the "extend" block again -- there may be multiple // extend blocks in the same scope, each of which will have the same path. // - A location's span is not always a subset of its parent's span. For // example, the "extendee" of an extension declaration appears at the // beginning of the "extend" block and is shared by all extensions within // the block. // - Just because a location's span is a subset of some other location's span // does not mean that it is a descendant. For example, a "group" defines // both a type and a field in a single declaration. Thus, the locations // corresponding to the type and field and their components will overlap. // - Code which tries to interpret locations should probably be designed to // ignore those that it doesn't understand, as more types of locations could // be recorded in the future. repeated Location location = 1; message Location { // Identifies which part of the FileDescriptorProto was defined at this // location. // // Each element is a field number or an index. They form a path from // the root FileDescriptorProto to the place where the definition. For // example, this path: // [ 4, 3, 2, 7, 1 ] // refers to: // file.message_type(3) // 4, 3 // .field(7) // 2, 7 // .name() // 1 // This is because FileDescriptorProto.message_type has field number 4: // repeated DescriptorProto message_type = 4; // and DescriptorProto.field has field number 2: // repeated FieldDescriptorProto field = 2; // and FieldDescriptorProto.name has field number 1: // optional string name = 1; // // Thus, the above path gives the location of a field name. If we removed // the last element: // [ 4, 3, 2, 7 ] // this path refers to the whole field declaration (from the beginning // of the label to the terminating semicolon). repeated int32 path = 1 [packed = true]; // Always has exactly three or four elements: start line, start column, // end line (optional, otherwise assumed same as start line), end column. // These are packed into a single field for efficiency. Note that line // and column numbers are zero-based -- typically you will want to add // 1 to each before displaying to a user. repeated int32 span = 2 [packed = true]; // If this SourceCodeInfo represents a complete declaration, these are any // comments appearing before and after the declaration which appear to be // attached to the declaration. // // A series of line comments appearing on consecutive lines, with no other // tokens appearing on those lines, will be treated as a single comment. // // leading_detached_comments will keep paragraphs of comments that appear // before (but not connected to) the current element. Each paragraph, // separated by empty lines, will be one comment element in the repeated // field. // // Only the comment content is provided; comment markers (e.g. //) are // stripped out. For block comments, leading whitespace and an asterisk // will be stripped from the beginning of each line other than the first. // Newlines are included in the output. // // Examples: // // optional int32 foo = 1; // Comment attached to foo. // // Comment attached to bar. // optional int32 bar = 2; // // optional string baz = 3; // // Comment attached to baz. // // Another line attached to baz. // // // Comment attached to qux. // // // // Another line attached to qux. // optional double qux = 4; // // // Detached comment for corge. This is not leading or trailing comments // // to qux or corge because there are blank lines separating it from // // both. // // // Detached comment for corge paragraph 2. // // optional string corge = 5; // /* Block comment attached // * to corge. Leading asterisks // * will be removed. */ // /* Block comment attached to // * grault. */ // optional int32 grault = 6; // // // ignored detached comments. optional string leading_comments = 3; optional string trailing_comments = 4; repeated string leading_detached_comments = 6; } } // Describes the relationship between generated code and its original source // file. A GeneratedCodeInfo message is associated with only one generated // source file, but may contain references to different source .proto files. message GeneratedCodeInfo { // An Annotation connects some span of text in generated code to an element // of its generating .proto file. repeated Annotation annotation = 1; message Annotation { // Identifies the element in the original source .proto file. This field // is formatted the same as SourceCodeInfo.Location.path. repeated int32 path = 1 [packed = true]; // Identifies the filesystem path to the original source .proto. optional string source_file = 2; // Identifies the starting offset in bytes in the generated code // that relates to the identified object. optional int32 begin = 3; // Identifies the ending offset in bytes in the generated code that // relates to the identified offset. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). optional int32 end = 4; } } ================================================ FILE: crates/client/vendor/google/protobuf/empty.proto ================================================ // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option go_package = "google.golang.org/protobuf/types/known/emptypb"; option java_package = "com.google.protobuf"; option java_outer_classname = "EmptyProto"; option java_multiple_files = true; option objc_class_prefix = "GPB"; option cc_enable_arenas = true; // A generic empty message that you can re-use to avoid defining duplicated // empty messages in your APIs. A typical example is to use it as the request // or the response type of an API method. For instance: // // service Foo { // rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); // } // // The JSON representation for `Empty` is empty JSON object `{}`. message Empty {} ================================================ FILE: crates/client/vendor/google/protobuf/field_mask.proto ================================================ // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option java_package = "com.google.protobuf"; option java_outer_classname = "FieldMaskProto"; option java_multiple_files = true; option objc_class_prefix = "GPB"; option go_package = "google.golang.org/protobuf/types/known/fieldmaskpb"; option cc_enable_arenas = true; // `FieldMask` represents a set of symbolic field paths, for example: // // paths: "f.a" // paths: "f.b.d" // // Here `f` represents a field in some root message, `a` and `b` // fields in the message found in `f`, and `d` a field found in the // message in `f.b`. // // Field masks are used to specify a subset of fields that should be // returned by a get operation or modified by an update operation. // Field masks also have a custom JSON encoding (see below). // // # Field Masks in Projections // // When used in the context of a projection, a response message or // sub-message is filtered by the API to only contain those fields as // specified in the mask. For example, if the mask in the previous // example is applied to a response message as follows: // // f { // a : 22 // b { // d : 1 // x : 2 // } // y : 13 // } // z: 8 // // The result will not contain specific values for fields x,y and z // (their value will be set to the default, and omitted in proto text // output): // // // f { // a : 22 // b { // d : 1 // } // } // // A repeated field is not allowed except at the last position of a // paths string. // // If a FieldMask object is not present in a get operation, the // operation applies to all fields (as if a FieldMask of all fields // had been specified). // // Note that a field mask does not necessarily apply to the // top-level response message. In case of a REST get operation, the // field mask applies directly to the response, but in case of a REST // list operation, the mask instead applies to each individual message // in the returned resource list. In case of a REST custom method, // other definitions may be used. Where the mask applies will be // clearly documented together with its declaration in the API. In // any case, the effect on the returned resource/resources is required // behavior for APIs. // // # Field Masks in Update Operations // // A field mask in update operations specifies which fields of the // targeted resource are going to be updated. The API is required // to only change the values of the fields as specified in the mask // and leave the others untouched. If a resource is passed in to // describe the updated values, the API ignores the values of all // fields not covered by the mask. // // If a repeated field is specified for an update operation, new values will // be appended to the existing repeated field in the target resource. Note that // a repeated field is only allowed in the last position of a `paths` string. // // If a sub-message is specified in the last position of the field mask for an // update operation, then new value will be merged into the existing sub-message // in the target resource. // // For example, given the target message: // // f { // b { // d: 1 // x: 2 // } // c: [1] // } // // And an update message: // // f { // b { // d: 10 // } // c: [2] // } // // then if the field mask is: // // paths: ["f.b", "f.c"] // // then the result will be: // // f { // b { // d: 10 // x: 2 // } // c: [1, 2] // } // // An implementation may provide options to override this default behavior for // repeated and message fields. // // In order to reset a field's value to the default, the field must // be in the mask and set to the default value in the provided resource. // Hence, in order to reset all fields of a resource, provide a default // instance of the resource and set all fields in the mask, or do // not provide a mask as described below. // // If a field mask is not present on update, the operation applies to // all fields (as if a field mask of all fields has been specified). // Note that in the presence of schema evolution, this may mean that // fields the client does not know and has therefore not filled into // the request will be reset to their default. If this is unwanted // behavior, a specific service may require a client to always specify // a field mask, producing an error if not. // // As with get operations, the location of the resource which // describes the updated values in the request message depends on the // operation kind. In any case, the effect of the field mask is // required to be honored by the API. // // ## Considerations for HTTP REST // // The HTTP kind of an update operation which uses a field mask must // be set to PATCH instead of PUT in order to satisfy HTTP semantics // (PUT must only be used for full updates). // // # JSON Encoding of Field Masks // // In JSON, a field mask is encoded as a single string where paths are // separated by a comma. Fields name in each path are converted // to/from lower-camel naming conventions. // // As an example, consider the following message declarations: // // message Profile { // User user = 1; // Photo photo = 2; // } // message User { // string display_name = 1; // string address = 2; // } // // In proto a field mask for `Profile` may look as such: // // mask { // paths: "user.display_name" // paths: "photo" // } // // In JSON, the same mask is represented as below: // // { // mask: "user.displayName,photo" // } // // # Field Masks and Oneof Fields // // Field masks treat fields in oneofs just as regular fields. Consider the // following message: // // message SampleMessage { // oneof test_oneof { // string name = 4; // SubMessage sub_message = 9; // } // } // // The field mask can be: // // mask { // paths: "name" // } // // Or: // // mask { // paths: "sub_message" // } // // Note that oneof type names ("test_oneof" in this case) cannot be used in // paths. // // ## Field Mask Verification // // The implementation of any API method which has a FieldMask type field in the // request should verify the included field paths, and return an // `INVALID_ARGUMENT` error if any path is unmappable. message FieldMask { // The set of field mask paths. repeated string paths = 1; } ================================================ FILE: crates/client/vendor/google/protobuf/timestamp.proto ================================================ // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option cc_enable_arenas = true; option go_package = "google.golang.org/protobuf/types/known/timestamppb"; option java_package = "com.google.protobuf"; option java_outer_classname = "TimestampProto"; option java_multiple_files = true; option objc_class_prefix = "GPB"; // A Timestamp represents a point in time independent of any time zone or local // calendar, encoded as a count of seconds and fractions of seconds at // nanosecond resolution. The count is relative to an epoch at UTC midnight on // January 1, 1970, in the proleptic Gregorian calendar which extends the // Gregorian calendar backwards to year one. // // All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap // second table is needed for interpretation, using a [24-hour linear // smear](https://developers.google.com/time/smear). // // The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By // restricting to that range, we ensure that we can convert to and from [RFC // 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. // // # Examples // // Example 1: Compute Timestamp from POSIX `time()`. // // Timestamp timestamp; // timestamp.set_seconds(time(NULL)); // timestamp.set_nanos(0); // // Example 2: Compute Timestamp from POSIX `gettimeofday()`. // // struct timeval tv; // gettimeofday(&tv, NULL); // // Timestamp timestamp; // timestamp.set_seconds(tv.tv_sec); // timestamp.set_nanos(tv.tv_usec * 1000); // // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. // // FILETIME ft; // GetSystemTimeAsFileTime(&ft); // UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; // // // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z // // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. // Timestamp timestamp; // timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); // timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); // // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. // // long millis = System.currentTimeMillis(); // // Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) // .setNanos((int) ((millis % 1000) * 1000000)).build(); // // // Example 5: Compute Timestamp from Java `Instant.now()`. // // Instant now = Instant.now(); // // Timestamp timestamp = // Timestamp.newBuilder().setSeconds(now.getEpochSecond()) // .setNanos(now.getNano()).build(); // // // Example 6: Compute Timestamp from current time in Python. // // timestamp = Timestamp() // timestamp.GetCurrentTime() // // # JSON Mapping // // In JSON format, the Timestamp type is encoded as a string in the // [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the // format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" // where {year} is always expressed using four digits while {month}, {day}, // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone // is required. A proto3 JSON serializer should always use UTC (as indicated by // "Z") when printing the Timestamp type and a proto3 JSON parser should be // able to accept both UTC and other timezones (as indicated by an offset). // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. // // In JavaScript, one can convert a Date object to this format using the // standard // [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) // method. In Python, a standard `datetime.datetime` object can be converted // to this format using // [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with // the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use // the Joda Time's [`ISODateTimeFormat.dateTime()`]( // http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D // ) to obtain a formatter capable of generating timestamps in this format. // // message Timestamp { // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. int64 seconds = 1; // Non-negative fractions of a second at nanosecond resolution. Negative // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. int32 nanos = 2; } ================================================ FILE: crates/client/vendor/google/rpc/status.proto ================================================ // Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.rpc; import "google/protobuf/any.proto"; option cc_enable_arenas = true; option go_package = "google.golang.org/genproto/googleapis/rpc/status;status"; option java_multiple_files = true; option java_outer_classname = "StatusProto"; option java_package = "com.google.rpc"; option objc_class_prefix = "RPC"; // The `Status` type defines a logical error model that is suitable for // different programming environments, including REST APIs and RPC APIs. It is // used by [gRPC](https://github.com/grpc). Each `Status` message contains // three pieces of data: error code, error message, and error details. // // You can find out more about this error model and how to work with it in the // [API Design Guide](https://cloud.google.com/apis/design/errors). message Status { // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. int32 code = 1; // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. string message = 2; // A list of messages that carry the error details. There is a common set of // message types for APIs to use. repeated google.protobuf.Any details = 3; } ================================================ FILE: crates/logging/Cargo.toml ================================================ [package] name = "containerd-shim-logging" version = "0.1.1" authors = [ "Maksym Pavlenko ", "The containerd Authors", ] description = "Logger extension for containerd v2 runtime" keywords = ["containerd", "shim", "containers"] categories = ["api-bindings", "asynchronous"] edition.workspace = true license.workspace = true repository.workspace = true homepage.workspace = true [features] docs = [] [package.metadata.docs.rs] features = ["docs"] ================================================ FILE: crates/logging/README.md ================================================ # Shim logging binaries for containerd [![Crates.io](https://img.shields.io/crates/v/containerd-shim-logging)](https://crates.io/crates/containerd-shim-logging) [![docs.rs](https://img.shields.io/docsrs/containerd-shim-logging)](https://docs.rs/containerd-shim-logging/latest/containerd_shim_logging/) [![Crates.io](https://img.shields.io/crates/l/containerd-shim-logging)](https://github.com/containerd/rust-extensions/blob/main/LICENSE) [![CI](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml) Shim v2 runtime supports pluggable logging binaries via stdio URIs. This crate implement `logging::run` to easy custom logger implementations in Rust. [containerd Documentation](https://github.com/containerd/containerd/tree/master/core/runtime/v2#logging) ## Example There is a journal example available as reference (originally written in Go [here](https://github.com/containerd/containerd/tree/dbef1d56d7ebc05bc4553d72c419ed5ce025b05d/runtime/v2#logging)): ```bash # Build $ sudo yum install systemd-devel $ cargo build --example journal # Run $ ctr i pull docker.io/library/hello-world:latest $ ctr run --rm --log-uri=binary:////path/to/journal_binary docker.io/library/hello-world:latest hello $ journalctl -f _COMM=journal -- Logs begin at Thu 2021-05-20 15:47:51 PDT. -- Jul 22 11:53:35 dev journal[3233968]: Jul 22 11:53:35 dev journal[3233968]: To try something more ambitious, you can run an Ubuntu container with: Jul 22 11:53:35 dev journal[3233968]: $ docker run -it ubuntu bash Jul 22 11:53:35 dev journal[3233968]: Jul 22 11:53:35 dev journal[3233968]: Share images, automate workflows, and more with a free Docker ID: Jul 22 11:53:35 dev journal[3233968]: https://hub.docker.com/ Jul 22 11:53:35 dev journal[3233968]: Jul 22 11:53:35 dev journal[3233968]: For more examples and ideas, visit: Jul 22 11:53:35 dev journal[3233968]: https://docs.docker.com/get-started/ Jul 22 11:53:35 dev journal[3233968]: ``` ================================================ FILE: crates/logging/examples/journal.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{fs, io, io::BufRead, thread}; use containerd_shim_logging as logging; use logging::{Config, Driver}; fn pump(reader: fs::File) { io::BufReader::new(reader) .lines() .map_while(Result::ok) .for_each(|_str| { // Write log string to destination here. // For instance with journald: // systemd::journal::print(0, &str); }); } struct Journal { stdout_handle: thread::JoinHandle<()>, stderr_handle: thread::JoinHandle<()>, } impl Driver for Journal { type Error = String; fn new(config: Config) -> Result { let stdout = config.stdout; let stderr = config.stderr; Ok(Journal { stdout_handle: thread::spawn(|| pump(stdout)), stderr_handle: thread::spawn(|| pump(stderr)), }) } fn wait(self) -> Result<(), Self::Error> { self.stdout_handle .join() .map_err(|err| format!("{:?}", err))?; self.stderr_handle .join() .map_err(|err| format!("{:?}", err))?; Ok(()) } } fn main() { logging::run::() } ================================================ FILE: crates/logging/src/lib.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![cfg_attr(feature = "docs", doc = include_str!("../README.md"))] use std::{env, fmt, fs, os::unix::io::FromRawFd, process}; /// Logging binary configuration received from containerd. #[derive(Debug)] pub struct Config { /// Container id. pub id: String, /// Container namespace. pub namespace: String, /// Stdout to forward logs from. pub stdout: fs::File, /// Stderr to forward logs from. pub stderr: fs::File, } impl Config { /// Creates a new configuration object. /// /// It'll query environment provided by containerd to fill up [Config] structure fields. /// /// # Panics /// Function call will panic if the environment is incorrect (note that this should be happen /// if launched from containerd). /// fn new() -> Config { let id = match env::var("CONTAINER_ID") { Ok(id) => id, Err(_) => handle_err("CONTAINER_ID env not found"), }; let namespace = match env::var("CONTAINER_NAMESPACE") { Ok(ns) => ns, Err(_) => handle_err("CONTAINER_NAMESPACE env not found"), }; let stdout = unsafe { fs::File::from_raw_fd(3) }; let stderr = unsafe { fs::File::from_raw_fd(4) }; Config { id, namespace, stdout, stderr, } } } /// Signal file wrapper. /// containerd uses a file with fd 5 as a signaling mechanism between the daemon and logger process. /// This is a wrapper for convenience. /// /// See [logging_unix.go] for details. /// /// [logging_unix.go]: https://github.com/containerd/containerd/blob/dbef1d56d7ebc05bc4553d72c419ed5ce025b05d/runtime/v2/logging/logging_unix.go#L44 struct Ready(fs::File); impl Ready { fn new() -> Ready { Ready(unsafe { fs::File::from_raw_fd(5) }) } /// Signal that we are ready and setup for the container to be started. fn signal(self) { drop(self.0) } } /// Driver is a trait to be implemented by v2 logging binaries. /// /// This trait is Rusty alternative to Go's `LoggerFunc`. /// /// # Example /// /// ```rust /// use containerd_shim_logging::{Config, Driver}; /// /// struct Logger; /// /// impl Driver for Logger { /// type Error = (); /// /// // Launch logger threads here. /// fn new(config: Config) -> Result { /// Ok(Logger {}) /// } /// /// // Wait for threads to finish. /// // In this example `Logger` will finish immediately. /// fn wait(self) -> Result<(), Self::Error> { /// Ok(()) /// } /// } /// ``` pub trait Driver: Sized { /// The error type to be returned from driver routines if something goes wrong. type Error: fmt::Debug; /// Create and run a new binary logger from the provided [Config]. /// /// Implementations are expected to start the logger driver (typically by spawning threads). /// Once returned, the crate will signal containerd that we're ready to log. fn new(config: Config) -> Result; /// Wait for the driver to finish. /// /// Once returned from this function, the binary logger process will shutdown. fn wait(self) -> Result<(), Self::Error>; } /// Entry point to run the logging driver. /// /// Typically `run` must be called from the `main` function to launch the driver. pub fn run() { let config = Config::new(); let ready = Ready::new(); // Initialize log driver let logger = match D::new(config) { Ok(driver) => driver, Err(err) => handle_err(err), }; // Signal ready to pump log data ready.signal(); // Run and block until exit if let Err(err) = logger.wait() { handle_err(err) } else { process::exit(0); } } #[inline] fn handle_err(err: impl fmt::Debug) -> ! { eprintln!("{:?}", err); process::exit(1); } ================================================ FILE: crates/runc/Cargo.toml ================================================ [package] name = "runc" version = "0.3.0" authors = ["Yuna Tomida ", "The containerd Authors"] description = "A crate for consuming the runc binary in your Rust applications" keywords = ["containerd", "containers", "runc"] categories = ["api-bindings", "asynchronous"] edition.workspace = true license.workspace = true repository.workspace = true homepage.workspace = true [features] async = ["tokio", "async-trait", "tokio-pipe"] docs = [] [dependencies] libc.workspace = true log.workspace = true nix = { workspace = true, features = ["user", "fs"] } oci-spec = { workspace = true, features = ["runtime"] } serde = { workspace = true, features = ["derive", "std"] } serde_json = { workspace = true, features = ["std"] } tempfile.workspace = true thiserror.workspace = true time = { workspace = true, features = ["serde", "std"] } uuid = { workspace = true, features = ["v4"] } # Async dependencies async-trait = { workspace = true, optional = true } tokio = { workspace = true, features = ["macros", "rt-multi-thread", "process", "sync", "fs", "io-util", "net", "time"], optional = true } tokio-pipe = { version = "0.2.12", default-features = false, optional = true } [package.metadata.docs.rs] features = ["docs"] ================================================ FILE: crates/runc/README.md ================================================ # Rust bindings for runc CLI [![Crates.io](https://img.shields.io/crates/v/runc)](https://crates.io/crates/runc) [![docs.rs](https://img.shields.io/docsrs/runc)](https://docs.rs/runc/latest/runc/) [![Crates.io](https://img.shields.io/crates/l/containerd-shim)](https://github.com/containerd/rust-extensions/blob/main/LICENSE) [![CI](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml) A crate for consuming the runc binary in your Rust applications, similar to [go-runc](https://github.com/containerd/go-runc) for Go. This crate is based on archived [rust-runc](https://github.com/pwFoo/rust-runc). ## Usage Both sync/async version is available. You can build runc client with `RuncConfig` in method chaining style. Call `build()` or `build_async()` to get client. Note that async client depends on [tokio](https://github.com/tokio-rs/tokio), then please use it on tokio runtime. ```rust,ignore #[tokio::main] async fn main() { let config = runc::GlobalOpts::new() .root("./new_root") .debug(false) .log("/path/to/logfile.json") .log_format(runc::LogFormat::Json) .rootless(true); let client = config.build_async().unwrap(); let opts = runc::options::CreateOpts::new() .pid_file("/path/to/pid/file") .no_pivot(true); client.create("container-id", "path/to/bundle", Some(&opts)).unwrap(); } ``` ## Limitations - Supported commands are only: - create - start - state - kill - delete - Exec is **not** available in `RuncAsyncClient` now. - Console utilites are **not** available - see [Go version](https://github.com/containerd/go-runc/blob/main/console.go) ================================================ FILE: crates/runc/src/asynchronous/io.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{fmt::Debug, io::Result, process::Stdio}; use async_trait::async_trait; use nix::unistd::{Gid, Uid}; use tokio::fs::OpenOptions; pub use crate::Io; use crate::{Command, Pipe, PipedIo}; #[derive(Debug, Clone)] pub struct IOOption { pub open_stdin: bool, pub open_stdout: bool, pub open_stderr: bool, } impl Default for IOOption { fn default() -> Self { Self { open_stdin: true, open_stdout: true, open_stderr: true, } } } impl PipedIo { pub fn new(uid: u32, gid: u32, opts: &IOOption) -> std::io::Result { Ok(Self { stdin: if opts.open_stdin { Self::create_pipe(uid, gid, true)? } else { None }, stdout: if opts.open_stdout { Self::create_pipe(uid, gid, true)? } else { None }, stderr: if opts.open_stderr { Self::create_pipe(uid, gid, true)? } else { None }, }) } fn create_pipe(uid: u32, gid: u32, stdin: bool) -> std::io::Result> { let pipe = Pipe::new()?; let uid = Some(Uid::from_raw(uid)); let gid = Some(Gid::from_raw(gid)); if stdin { let rd = pipe.rd.try_clone()?; nix::unistd::fchown(rd, uid, gid)?; } else { let wr = pipe .try_clone_wr() .ok_or_else(|| std::io::Error::other("write end closed"))?; nix::unistd::fchown(wr, uid, gid)?; } Ok(Some(pipe)) } } /// IO driver to direct output/error messages to /dev/null. /// /// With this Io driver, all methods of [crate::Runc] can't capture the output/error messages. #[derive(Debug)] pub struct NullIo { dev_null: std::sync::Mutex>, } impl NullIo { pub fn new() -> std::io::Result { let f = std::fs::OpenOptions::new().read(true).open("/dev/null")?; let dev_null = std::sync::Mutex::new(Some(f)); Ok(Self { dev_null }) } } #[async_trait] impl Io for NullIo { async fn set(&self, cmd: &mut Command) -> std::io::Result<()> { if let Some(null) = self.dev_null.lock().unwrap().as_ref() { cmd.stdout(null.try_clone()?); cmd.stderr(null.try_clone()?); } Ok(()) } async fn close_after_start(&self) { let mut m = self.dev_null.lock().unwrap(); let _ = m.take(); } } /// Io driver based on Stdio::inherited(), to direct outputs/errors to stdio. /// /// With this Io driver, all methods of [crate::Runc] can't capture the output/error messages. #[derive(Debug)] pub struct InheritedStdIo {} impl InheritedStdIo { pub fn new() -> std::io::Result { Ok(InheritedStdIo {}) } } #[async_trait] impl Io for InheritedStdIo { async fn set(&self, cmd: &mut Command) -> std::io::Result<()> { cmd.stdin(Stdio::null()) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()); Ok(()) } async fn close_after_start(&self) {} } /// Io driver based on Stdio::piped(), to capture outputs/errors from runC. /// /// With this Io driver, methods of [crate::Runc] may capture the output/error messages. #[derive(Debug)] pub struct PipedStdIo {} impl PipedStdIo { pub fn new() -> std::io::Result { Ok(PipedStdIo {}) } } #[async_trait] impl Io for PipedStdIo { async fn set(&self, cmd: &mut Command) -> std::io::Result<()> { cmd.stdin(Stdio::null()) .stdout(Stdio::piped()) .stderr(Stdio::piped()); Ok(()) } async fn close_after_start(&self) {} } /// FIFO for the scenario that set FIFO for command Io. #[derive(Debug)] pub struct FIFO { pub stdin: Option, pub stdout: Option, pub stderr: Option, } #[async_trait] impl Io for FIFO { async fn set(&self, cmd: &mut Command) -> Result<()> { if let Some(path) = self.stdin.as_ref() { let stdin = OpenOptions::new() .read(true) .custom_flags(libc::O_NONBLOCK) .open(path) .await?; cmd.stdin(stdin.into_std().await); } if let Some(path) = self.stdout.as_ref() { let stdout = OpenOptions::new().write(true).open(path).await?; cmd.stdout(stdout.into_std().await); } if let Some(path) = self.stderr.as_ref() { let stderr = OpenOptions::new().write(true).open(path).await?; cmd.stderr(stderr.into_std().await); } Ok(()) } async fn close_after_start(&self) {} } #[cfg(test)] mod tests { use super::*; #[cfg(not(target_os = "macos"))] #[test] fn test_io_option() { let opts = IOOption { open_stdin: false, open_stdout: false, open_stderr: false, }; let io = PipedIo::new(1000, 1000, &opts).unwrap(); assert!(io.stdin().is_none()); assert!(io.stdout().is_none()); assert!(io.stderr().is_none()); } #[tokio::test] async fn test_null_io() { let io = NullIo::new().unwrap(); assert!(io.stdin().is_none()); assert!(io.stdout().is_none()); assert!(io.stderr().is_none()); } } ================================================ FILE: crates/runc/src/asynchronous/mod.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ pub mod io; mod pipe; mod runc; use std::{fmt::Debug, io::Result, os::fd::AsRawFd}; use async_trait::async_trait; pub use pipe::Pipe; pub use runc::{DefaultExecutor, Spawner}; use tokio::io::{AsyncRead, AsyncWrite}; use crate::Command; #[async_trait] pub trait Io: Debug + Send + Sync { fn stdin(&self) -> Option> { None } fn stdout(&self) -> Option> { None } fn stderr(&self) -> Option> { None } /// Set IO for passed command. /// Read side of stdin, write side of stdout and write side of stderr should be provided to command. async fn set(&self, cmd: &mut Command) -> Result<()>; /// Only close write side (should be stdout/err "from" runc process) async fn close_after_start(&self); } #[derive(Debug)] pub struct PipedIo { pub stdin: Option, pub stdout: Option, pub stderr: Option, } #[async_trait] impl Io for PipedIo { fn stdin(&self) -> Option> { self.stdin.as_ref().and_then(|pipe| { pipe.wr_as_raw_fd().and_then(|fd| { tokio_pipe::PipeWrite::from_raw_fd_checked(fd) .map(|x| Box::new(x) as Box) .ok() }) }) } fn stdout(&self) -> Option> { self.stdout.as_ref().and_then(|pipe| { let fd = pipe.rd.as_raw_fd(); tokio_pipe::PipeRead::from_raw_fd_checked(fd) .map(|x| Box::new(x) as Box) .ok() }) } fn stderr(&self) -> Option> { self.stderr.as_ref().and_then(|pipe| { let fd = pipe.rd.as_raw_fd(); tokio_pipe::PipeRead::from_raw_fd_checked(fd) .map(|x| Box::new(x) as Box) .ok() }) } // Note that this internally use [`std::fs::File`]'s `try_clone()`. // Thus, the files passed to commands will be not closed after command exit. async fn set(&self, cmd: &mut Command) -> std::io::Result<()> { if let Some(p) = self.stdin.as_ref() { let pr = p.rd.try_clone()?; cmd.stdin(pr); } if let Some(p) = self.stdout.as_ref() { let pw = p .try_clone_wr() .ok_or_else(|| std::io::Error::other("write end closed"))?; cmd.stdout(pw); } if let Some(p) = self.stderr.as_ref() { let pw = p .try_clone_wr() .ok_or_else(|| std::io::Error::other("write end closed"))?; cmd.stdout(pw); } Ok(()) } async fn close_after_start(&self) { if let Some(p) = self.stdout.as_ref() { p.close_wr(); } if let Some(p) = self.stderr.as_ref() { p.close_wr(); } } } ================================================ FILE: crates/runc/src/asynchronous/pipe.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{ os::unix::io::{AsRawFd, OwnedFd, RawFd}, sync::Mutex, }; /// Struct to represent a pipe that can be used to transfer stdio inputs and outputs. /// /// With this Io driver, methods of [crate::Runc] may capture the output/error messages. /// When one side of the pipe is closed, the state will be represented with [`None`]. #[derive(Debug)] pub struct Pipe { pub rd: OwnedFd, wr: Mutex>, } impl Pipe { pub fn new() -> std::io::Result { let (rd, wr) = std::io::pipe()?; Ok(Self { rd: OwnedFd::from(rd), wr: Mutex::new(Some(OwnedFd::from(wr))), }) } /// Return the raw fd of the write end. Returns `None` if closed. pub fn wr_as_raw_fd(&self) -> Option { self.wr.lock().unwrap().as_ref().map(|w| w.as_raw_fd()) } /// Clone the write end. Returns `None` if closed. pub fn try_clone_wr(&self) -> Option { self.wr .lock() .unwrap() .as_ref() .and_then(|w| w.try_clone().ok()) } /// Close the write end by dropping it. No-op if already closed. pub fn close_wr(&self) { let _ = self.wr.lock().unwrap().take(); } /// Take ownership of the write end. Returns `None` if already closed. pub fn take_wr(&self) -> Option { self.wr.lock().unwrap().take() } } #[cfg(test)] mod tests { use std::os::fd::IntoRawFd; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, net::unix::pipe, }; use super::*; #[tokio::test] async fn test_pipe_creation() { let pipe = Pipe::new().expect("Failed to create pipe"); let wr = pipe.take_wr().unwrap(); assert!( pipe.rd.into_raw_fd() >= 0, "Read file descriptor is invalid" ); assert!(wr.into_raw_fd() >= 0, "Write file descriptor is invalid"); } #[tokio::test] async fn test_pipe_write_read() { let pipe = Pipe::new().expect("Failed to create pipe"); let mut write_end = pipe::Sender::from_owned_fd(pipe.take_wr().unwrap()).unwrap(); let mut read_end = pipe::Receiver::from_owned_fd(pipe.rd).unwrap(); let write_data = b"hello"; write_end .write_all(write_data) .await .expect("Failed to write to pipe"); let mut read_data = vec![0; write_data.len()]; read_end .read_exact(&mut read_data) .await .expect("Failed to read from pipe"); assert_eq!( read_data, write_data, "Data read from pipe does not match data written" ); } #[tokio::test] async fn test_pipe_async_write_read() { let pipe = Pipe::new().expect("Failed to create pipe"); let mut write_end = pipe::Sender::from_owned_fd(pipe.take_wr().unwrap()).unwrap(); let mut read_end = pipe::Receiver::from_owned_fd(pipe.rd).unwrap(); let write_data = b"hello"; tokio::spawn(async move { write_end .write_all(write_data) .await .expect("Failed to write to pipe"); }); let mut read_data = vec![0; write_data.len()]; read_end .read_exact(&mut read_data) .await .expect("Failed to read from pipe"); assert_eq!( &read_data, write_data, "Data read from pipe does not match data written" ); } } ================================================ FILE: crates/runc/src/asynchronous/runc.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{fmt::Debug, path::Path, process::ExitStatus}; use async_trait::async_trait; use log::debug; use oci_spec::runtime::{LinuxResources, Process}; use crate::{ container::Container, error::Error, events, options::*, utils::{self, write_value_to_temp_file}, Command, Response, Result, Runc, }; // a macro tool to cleanup the file with name $filename, // there is no async drop in async rust, so we have to call remove_file everytime // after a temp file created, before return of a function. // with this macro we don't have to write the match case codes everytime. macro_rules! tc { ($b:expr, $filename: expr) => { match $b { Ok(r) => r, Err(e) => { let _ = tokio::fs::remove_file($filename).await; return Err(e); } } }; } /// Async implementation for [Runc]. /// /// Note that you MUST use this client on tokio runtime, as this client internally use [`tokio::process::Command`] /// and some other utilities. impl Runc { pub(crate) async fn launch(&self, mut cmd: Command, combined_output: bool) -> Result { debug!("Execute command {:?}", cmd); unsafe { cmd.pre_exec(move || { #[cfg(target_os = "linux")] if let Ok(thp) = std::env::var("THP_DISABLED") { if let Ok(thp_disabled) = thp.parse::() { let ret = libc::prctl( libc::PR_SET_THP_DISABLE, if thp_disabled { 1u64 } else { 0u64 }, 0, 0, 0, ); if ret < 0 { debug!("set_thp_disable err: {}", std::io::Error::last_os_error()); } } } Ok(()) }); } let (status, pid, stdout, stderr) = self.spawner.execute(cmd).await?; if status.success() { let output = if combined_output { stdout + stderr.as_str() } else { stdout }; Ok(Response { pid, status, output, }) } else { Err(Error::CommandFailed { status, stdout, stderr, }) } } /// Create a new container pub async fn create

( &self, id: &str, bundle: P, opts: Option<&CreateOpts>, ) -> Result where P: AsRef, { let mut args = vec![ "create".to_string(), "--bundle".to_string(), utils::abs_string(bundle)?, ]; if let Some(opts) = opts { args.append(&mut opts.args()?); } args.push(id.to_string()); let mut cmd = self.command(&args)?; match opts { Some(CreateOpts { io: Some(io), .. }) => { io.set(&mut cmd).await.map_err(Error::UnavailableIO)?; let res = self.launch(cmd, true).await?; io.close_after_start().await; Ok(res) } _ => self.launch(cmd, true).await, } } /// Delete a container pub async fn delete(&self, id: &str, opts: Option<&DeleteOpts>) -> Result<()> { let mut args = vec!["delete".to_string()]; if let Some(opts) = opts { args.append(&mut opts.args()); } args.push(id.to_string()); let _ = self.launch(self.command(&args)?, true).await?; Ok(()) } /// Return an event stream of container notifications pub async fn events(&self, _id: &str, _interval: &std::time::Duration) -> Result<()> { Err(Error::Unimplemented("events".to_string())) } /// Execute an additional process inside the container pub async fn exec(&self, id: &str, spec: &Process, opts: Option<&ExecOpts>) -> Result<()> { let f = write_value_to_temp_file(spec).await?; let mut args = vec!["exec".to_string(), "--process".to_string(), f.clone()]; if let Some(opts) = opts { args.append(&mut tc!(opts.args(), &f)); } args.push(id.to_string()); let mut cmd = self.command(&args)?; match opts { Some(ExecOpts { io: Some(io), .. }) => { tc!( io.set(&mut cmd) .await .map_err(|e| Error::IoSet(e.to_string())), &f ); tc!(self.launch(cmd, true).await, &f); io.close_after_start().await; } _ => { tc!(self.launch(cmd, true).await, &f); } } let _ = tokio::fs::remove_file(&f).await; Ok(()) } /// Send the specified signal to processes inside the container pub async fn kill(&self, id: &str, sig: u32, opts: Option<&KillOpts>) -> Result<()> { let mut args = vec!["kill".to_string()]; if let Some(opts) = opts { args.append(&mut opts.args()); } args.push(id.to_string()); args.push(sig.to_string()); let _ = self.launch(self.command(&args)?, true).await?; Ok(()) } /// List all containers associated with this runc instance pub async fn list(&self) -> Result> { let args = ["list".to_string(), "--format=json".to_string()]; let res = self.launch(self.command(&args)?, true).await?; let output = res.output.trim(); // Ugly hack to work around golang Ok(if output == "null" { Vec::new() } else { serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)? }) } /// Pause a container pub async fn pause(&self, id: &str) -> Result<()> { let args = ["pause".to_string(), id.to_string()]; let _ = self.launch(self.command(&args)?, true).await?; Ok(()) } /// Resume a container pub async fn resume(&self, id: &str) -> Result<()> { let args = ["resume".to_string(), id.to_string()]; let _ = self.launch(self.command(&args)?, true).await?; Ok(()) } pub async fn checkpoint(&self) -> Result<()> { Err(Error::Unimplemented("checkpoint".to_string())) } pub async fn restore(&self) -> Result<()> { Err(Error::Unimplemented("restore".to_string())) } /// List all the processes inside the container, returning their pids pub async fn ps(&self, id: &str) -> Result> { let args = [ "ps".to_string(), "--format=json".to_string(), id.to_string(), ]; let res = self.launch(self.command(&args)?, true).await?; let output = res.output.trim(); // Ugly hack to work around golang Ok(if output == "null" { Vec::new() } else { serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)? }) } /// Run the create, start, delete lifecycle of the container and return its exit status pub async fn run

(&self, id: &str, bundle: P, opts: Option<&CreateOpts>) -> Result<()> where P: AsRef, { let mut args = vec![ "run".to_string(), "--bundle".to_string(), utils::abs_string(bundle)?, ]; if let Some(opts) = opts { args.append(&mut opts.args()?); } args.push(id.to_string()); let mut cmd = self.command(&args)?; if let Some(CreateOpts { io: Some(io), .. }) = opts { io.set(&mut cmd) .await .map_err(|e| Error::IoSet(e.to_string()))?; }; let _ = self.launch(cmd, true).await?; Ok(()) } /// Start an already created container pub async fn start(&self, id: &str) -> Result<()> { let args = vec!["start".to_string(), id.to_string()]; let _ = self.launch(self.command(&args)?, true).await?; Ok(()) } /// Return the state of a container pub async fn state(&self, id: &str) -> Result { let args = vec!["state".to_string(), id.to_string()]; let res = self.launch(self.command(&args)?, true).await?; serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed) } /// Return the latest statistics for a container pub async fn stats(&self, id: &str) -> Result { let args = vec!["events".to_string(), "--stats".to_string(), id.to_string()]; let res = self.launch(self.command(&args)?, true).await?; let event: events::Event = serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed)?; if let Some(stats) = event.stats { Ok(stats) } else { Err(Error::MissingContainerStats) } } /// Update a container with the provided resource spec pub async fn update(&self, id: &str, resources: &LinuxResources) -> Result<()> { let f = write_value_to_temp_file(resources).await?; let args = [ "update".to_string(), "--resources".to_string(), f.to_string(), id.to_string(), ]; let _ = tc!(self.launch(self.command(&args)?, true).await, &f); let _ = tokio::fs::remove_file(&f).await; Ok(()) } } #[async_trait] pub trait Spawner: Debug { async fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)>; } #[derive(Debug)] pub struct DefaultExecutor {} #[async_trait] impl Spawner for DefaultExecutor { async fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)> { let mut cmd = cmd; let child = cmd.spawn().map_err(Error::ProcessSpawnFailed)?; let pid = child.id().unwrap(); let result = child .wait_with_output() .await .map_err(Error::InvalidCommand)?; let status = result.status; let stdout = String::from_utf8_lossy(&result.stdout).to_string(); let stderr = String::from_utf8_lossy(&result.stderr).to_string(); Ok((status, pid, stdout, stderr)) } } #[cfg(test)] #[cfg(target_os = "linux")] mod tests { use std::sync::Arc; use crate::{ error::Error, io::{InheritedStdIo, PipedStdIo}, options::{CreateOpts, DeleteOpts, GlobalOpts}, Runc, }; fn ok_client() -> Runc { GlobalOpts::new() .command("/bin/true") .build() .expect("unable to create runc instance") } fn fail_client() -> Runc { GlobalOpts::new() .command("/bin/false") .build() .expect("unable to create runc instance") } fn echo_client() -> Runc { GlobalOpts::new() .command("/bin/echo") .build() .expect("unable to create runc instance") } #[tokio::test] async fn test_async_create() { let opts = CreateOpts::new(); let ok_runc = ok_client(); let ok_task = tokio::spawn(async move { let response = ok_runc .create("fake-id", "fake-bundle", Some(&opts)) .await .expect("true failed."); assert_ne!(response.pid, 0); assert!(response.status.success()); assert!(response.output.is_empty()); }); let opts = CreateOpts::new(); let fail_runc = fail_client(); let fail_task = tokio::spawn(async move { match fail_runc .create("fake-id", "fake-bundle", Some(&opts)) .await { Ok(_) => panic!("fail_runc returned exit status 0."), Err(Error::CommandFailed { status, stdout, stderr, }) => { if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() { eprintln!("fail_runc succeeded."); } else { panic!("unexpected outputs from fail_runc.") } } Err(e) => panic!("unexpected error from fail_runc: {:?}", e), } }); ok_task.await.expect("ok_task failed."); fail_task.await.expect("fail_task unexpectedly succeeded."); } #[tokio::test] async fn test_async_start() { let ok_runc = ok_client(); let ok_task = tokio::spawn(async move { ok_runc.start("fake-id").await.expect("true failed."); eprintln!("ok_runc succeeded."); }); let fail_runc = fail_client(); let fail_task = tokio::spawn(async move { match fail_runc.start("fake-id").await { Ok(_) => panic!("fail_runc returned exit status 0."), Err(Error::CommandFailed { status, stdout, stderr, }) => { if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() { eprintln!("fail_runc succeeded."); } else { panic!("unexpected outputs from fail_runc.") } } Err(e) => panic!("unexpected error from fail_runc: {:?}", e), } }); ok_task.await.expect("ok_task failed."); fail_task.await.expect("fail_task unexpectedly succeeded."); } #[tokio::test] async fn test_async_run() { let opts = CreateOpts::new(); let ok_runc = ok_client(); tokio::spawn(async move { ok_runc .create("fake-id", "fake-bundle", Some(&opts)) .await .expect("true failed."); eprintln!("ok_runc succeeded."); }); let opts = CreateOpts::new(); let fail_runc = fail_client(); tokio::spawn(async move { match fail_runc .create("fake-id", "fake-bundle", Some(&opts)) .await { Ok(_) => panic!("fail_runc returned exit status 0."), Err(Error::CommandFailed { status, stdout, stderr, }) => { if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() { eprintln!("fail_runc succeeded."); } else { panic!("unexpected outputs from fail_runc.") } } Err(e) => panic!("unexpected error from fail_runc: {:?}", e), } }) .await .expect("tokio spawn falied."); } #[tokio::test] async fn test_async_delete() { let opts = DeleteOpts::new(); let ok_runc = ok_client(); tokio::spawn(async move { ok_runc .delete("fake-id", Some(&opts)) .await .expect("true failed."); eprintln!("ok_runc succeeded."); }); let opts = DeleteOpts::new(); let fail_runc = fail_client(); tokio::spawn(async move { match fail_runc.delete("fake-id", Some(&opts)).await { Ok(_) => panic!("fail_runc returned exit status 0."), Err(Error::CommandFailed { status, stdout, stderr, }) => { if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() { eprintln!("fail_runc succeeded."); } else { panic!("unexpected outputs from fail_runc.") } } Err(e) => panic!("unexpected error from fail_runc: {:?}", e), } }) .await .expect("tokio spawn falied."); } #[tokio::test] async fn test_async_output() { // test create cmd with inherit Io, expect empty cmd output let mut opts = CreateOpts::new(); opts.io = Some(Arc::new(InheritedStdIo::new().unwrap())); let echo_runc = echo_client(); let response = echo_runc .create("fake-id", "fake-bundle", Some(&opts)) .await .expect("echo failed:"); assert_ne!(response.pid, 0); assert!(response.status.success()); assert!(response.output.is_empty()); // test create cmd with pipe Io, expect nonempty cmd output let mut opts = CreateOpts::new(); opts.io = Some(Arc::new(PipedStdIo::new().unwrap())); let response = echo_runc .create("fake-id", "fake-bundle", Some(&opts)) .await .expect("echo failed:"); assert_ne!(response.pid, 0); assert!(response.status.success()); assert!(!response.output.is_empty()); } } ================================================ FILE: crates/runc/src/container.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Forked from https://github.com/pwFoo/rust-runc/blob/313e6ae5a79b54455b0a242a795c69adf035141a/src/lib.rs /* * Copyright 2020 fsyncd, Berlin, Germany. * Additional material, copyright of the containerd authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use std::collections::HashMap; use serde::{Deserialize, Serialize}; use time::{serde::timestamp, OffsetDateTime}; /// Information for runc container #[derive(Debug, Serialize, Deserialize)] pub struct Container { pub id: String, pub pid: usize, pub status: String, pub bundle: String, pub rootfs: String, #[serde(with = "timestamp")] pub created: OffsetDateTime, pub annotations: HashMap, } #[cfg(test)] mod tests { use super::*; #[test] fn serde_test() { let j = r#" { "id": "fake", "pid": 1000, "status": "RUNNING", "bundle": "/path/to/bundle", "rootfs": "/path/to/rootfs", "created": 1431684000, "annotations": { "foo": "bar" } }"#; let c: Container = serde_json::from_str(j).unwrap(); assert_eq!(c.id, "fake"); assert_eq!(c.pid, 1000); assert_eq!(c.status, "RUNNING"); assert_eq!(c.bundle, "/path/to/bundle"); assert_eq!(c.rootfs, "/path/to/rootfs"); assert_eq!( c.created, OffsetDateTime::from_unix_timestamp(1431684000).unwrap() ); assert_eq!(c.annotations.get("foo"), Some(&"bar".to_string())); assert_eq!(c.annotations.get("bar"), None); } } ================================================ FILE: crates/runc/src/error.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Forked from https://github.com/pwFoo/rust-runc/blob/313e6ae5a79b54455b0a242a795c69adf035141a/src/lib.rs /* * Copyright 2020 fsyncd, Berlin, Germany. * Additional material, copyright of the containerd authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use std::{env, io, process::ExitStatus}; use thiserror::Error; #[derive(Error, Debug)] pub enum Error { #[error("Unable to extract test files: {0}")] BundleExtractFailed(io::Error), #[error("Invalid path: {0}")] InvalidPath(io::Error), #[error(transparent)] JsonDeserializationFailed(#[from] serde_json::error::Error), #[error("Missing container statistics")] MissingContainerStats, #[error(transparent)] ProcessSpawnFailed(io::Error), #[error("Error occured in runc: {0}")] InvalidCommand(io::Error), #[error("Runc command failed: status={status}, stdout=\"{stdout}\", stderr=\"{stderr}\"")] CommandFailed { status: ExitStatus, stdout: String, stderr: String, }, #[error("Runc IO unavailable: {0}")] UnavailableIO(io::Error), #[cfg(feature = "async")] #[error("Runc command timed out: {0}")] CommandTimeout(tokio::time::error::Elapsed), #[error("Unable to parse runc version")] InvalidVersion, #[error("Unable to locate the runc")] NotFound, #[error("Error occurs with fs: {0}")] FileSystemError(io::Error), #[error("Failed to spec file: {0}")] SpecFileCreationFailed(io::Error), #[error(transparent)] SpecFileCleanupFailed(io::Error), #[error("Failed to find valid path for spec file")] SpecFileNotFound, #[error("Top command is missing a pid header")] TopMissingPidHeader, #[error("Top command returned an empty response")] TopShortResponseError, #[error("Unix socket connection error: {0}")] UnixSocketConnectionFailed(io::Error), #[error("Unable to bind to unix socket: {0}")] UnixSocketBindFailed(io::Error), #[error("Unix socket failed to receive pty")] UnixSocketReceiveMessageFailed, #[error("Unix socket unexpectedly closed")] UnixSocketClosed, #[error("Failed to handle environment variable: {0}")] EnvError(env::VarError), #[error("Sorry, this part of api is not implemented: {0}")] Unimplemented(String), #[error("Error occured in runc client: {0}")] Other(Box), #[error("Failed to set cmd io: {0}")] IoSet(String), #[error("Failed to create dir: {0}")] CreateDir(nix::Error), } ================================================ FILE: crates/runc/src/events.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Forked from https://github.com/pwFoo/rust-runc/blob/313e6ae5a79b54455b0a242a795c69adf035141a/src/events.rs /* * Copyright 2020 fsyncd, Berlin, Germany. * Additional material, copyright of the containerd authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use std::collections::HashMap; use serde::{Deserialize, Serialize}; /// Event type generated by runc #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all(serialize = "lowercase", deserialize = "lowercase"))] pub enum EventType { /// Statistics Stats, /// Out of memory Oom, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Event { #[serde(rename = "type")] pub event_type: EventType, pub id: String, #[serde(rename = "data")] pub stats: Option, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Stats { pub cpu: Cpu, pub memory: Memory, pub pids: Pids, #[serde(rename = "blkio")] pub block_io: BlkIO, #[serde(rename = "hugetlb")] pub huge_tlb: HugeTLB, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct HugeTLB { pub usage: Option, pub max: Option, #[serde(rename = "failcnt")] pub fail_count: u64, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct BlkIOEntry { pub major: Option, pub minor: Option, pub op: Option, pub value: Option, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct BlkIO { /// Number of bytes transferred to and from the disk #[serde(rename = "ioServiceBytesRecursive")] pub io_service_bytes_recursive: Option>, /// Number of io requests issued to the disk #[serde(rename = "ioServicedRecursive")] pub io_serviced_recursive: Option>, /// Number of queued disk io requests #[serde(rename = "ioQueueRecursive")] pub io_queued_recursive: Option>, /// Amount of time io requests took to service #[serde(rename = "ioServiceTimeRecursive")] pub io_service_time_recursive: Option>, /// Amount of time io requests spent waiting in the queue #[serde(rename = "ioWaitTimeRecursive")] pub io_wait_time_recursive: Option>, /// Number of merged io requests #[serde(rename = "ioMergedRecursive")] pub io_merged_recursive: Option>, /// Disk time allocated the device #[serde(rename = "ioTimeRecursive")] pub io_time_recursive: Option>, /// Number of sectors transferred to and from the io device #[serde(rename = "sectorsRecursive")] pub sectors_recursive: Option>, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Pids { /// Number of pids in the cgroup pub current: Option, /// Active pids hard limit pub limit: Option, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Throttling { /// Number of periods with throttling active pub periods: Option, #[serde(rename = "throttledPeriods")] /// Number of periods when the container hit its throttling limit pub throtted_periods: Option, /// Aggregate time the container was throttled for in nanoseconds #[serde(rename = "throttledTime")] pub throtted_time: Option, } /// Each members represents time in nanoseconds #[derive(Debug, Clone, Serialize, Deserialize)] pub struct CpuUsage { /// Total CPU time consumed pub total: Option, /// Total CPU time consumed per core pub per_cpu: Option>, /// Total CPU time consumed in kernel mode pub kernel: u64, /// Total CPU time consumed in user mode pub user: u64, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Cpu { pub usage: Option, pub throttling: Option, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MemoryEntry { /// Memory limit in bytes pub limit: u64, /// Usage in bytes pub usage: Option, /// Maximum usage in bytes pub max: Option, /// Count of memory allocation failures #[serde(rename = "failcnt")] pub fail_count: u64, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Memory { /// Memory usage for cache pub cache: Option, /// Overall memory usage, excluding swap pub usage: Option, /// Overall memory usage, including swap pub swap: Option, /// Kernel usage of memory pub kernel: Option, /// Kernel TCP of memory #[serde(rename = "kernelTCP")] pub kernel_tcp: Option, /// Raw stats of memory pub raw: Option>, } ================================================ FILE: crates/runc/src/lib.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Forked from https://github.com/pwFoo/rust-runc/blob/313e6ae5a79b54455b0a242a795c69adf035141a/src/lib.rs /* * Copyright 2020 fsyncd, Berlin, Germany. * Additional material, copyright of the containerd authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #![cfg_attr(feature = "docs", doc = include_str!("../README.md"))] //! A crate for consuming the runc binary in your Rust applications, similar to //! [go-runc](https://github.com/containerd/go-runc) for Go. use std::{ fmt::{self, Debug, Display}, path::PathBuf, process::{ExitStatus, Stdio}, sync::Arc, }; #[cfg(feature = "async")] pub use crate::asynchronous::*; #[cfg(not(feature = "async"))] pub use crate::synchronous::*; #[cfg(feature = "async")] pub mod asynchronous; pub mod container; pub mod error; pub mod events; #[cfg(not(feature = "async"))] pub mod synchronous; #[cfg(feature = "async")] pub mod monitor; pub mod options; pub mod utils; const JSON: &str = "json"; const TEXT: &str = "text"; pub type Result = std::result::Result; /// Response is for (pid, exit status, outputs). #[derive(Debug, Clone)] pub struct Response { pub pid: u32, pub status: ExitStatus, pub output: String, } #[derive(Debug, Clone)] pub struct Version { pub runc_version: Option, pub spec_version: Option, pub commit: Option, } #[derive(Debug, Clone, Default)] pub enum LogFormat { Json, #[default] Text, } impl Display for LogFormat { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { LogFormat::Json => write!(f, "{}", JSON), LogFormat::Text => write!(f, "{}", TEXT), } } } #[cfg(not(feature = "async"))] pub type Command = std::process::Command; #[cfg(feature = "async")] pub type Command = tokio::process::Command; #[derive(Debug, Clone)] pub struct Runc { command: PathBuf, args: Vec, spawner: Arc, } impl Runc { fn command(&self, args: &[String]) -> Result { let args = [&self.args, args].concat(); let mut cmd = Command::new(&self.command); // Default to piped stdio, and they may be override by command options. cmd.stdin(Stdio::null()) .stdout(Stdio::piped()) .stderr(Stdio::piped()); // NOTIFY_SOCKET introduces a special behavior in runc but should only be set if invoked from systemd cmd.args(&args).env_remove("NOTIFY_SOCKET"); Ok(cmd) } } ================================================ FILE: crates/runc/src/monitor.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::process::{ExitStatus, Output}; use async_trait::async_trait; use log::error; use time::OffsetDateTime; use tokio::{ process::Command, sync::oneshot::{channel, Receiver, Sender}, }; use crate::error::Error; /// A trait for spawning and waiting for a process. /// /// The design is different from Go's, because if you return a `Sender` in [ProcessMonitor::start()] /// and want to use it in [ProcessMonitor::wait()], then start and wait cannot be executed /// concurrently. Alternatively, let the caller to prepare the communication channel for /// [ProcessMonitor::start()] and [ProcessMonitor::wait()] so they could be executed concurrently. #[async_trait] pub trait ProcessMonitor { /// Spawn a process and return its output. /// /// In order to capture the output/error, it is necessary for the caller to create new pipes /// between parent and child. /// Use [tokio::process::Command::stdout(Stdio::piped())](https://docs.rs/tokio/1.16.1/tokio/process/struct.Command.html#method.stdout) /// and/or [tokio::process::Command::stderr(Stdio::piped())](https://docs.rs/tokio/1.16.1/tokio/process/struct.Command.html#method.stderr) /// respectively, when creating the [Command](https://docs.rs/tokio/1.16.1/tokio/process/struct.Command.html#). async fn start(&self, mut cmd: Command, tx: Sender) -> std::io::Result { let chi = cmd.spawn()?; // Safe to expect() because wait() hasn't been called yet, dependence on tokio interanl // implementation details. let pid = chi .id() .expect("failed to take pid of the container process."); let out = chi.wait_with_output().await?; let ts = OffsetDateTime::now_utc(); // On Unix, out.status.code() will return None if the process was terminated by a signal. let status = out.status.code().unwrap_or(-1); match tx.send(Exit { ts, pid, status }) { Ok(_) => Ok(out), Err(e) => { error!("command {:?} exited but receiver dropped.", cmd); error!("couldn't send messages: {:?}", e); Err(std::io::ErrorKind::ConnectionRefused.into()) } } } /// Wait for the spawned process to exit and return the exit status. async fn wait(&self, rx: Receiver) -> std::io::Result { rx.await.map_err(|_| { error!("sender dropped."); std::io::ErrorKind::BrokenPipe.into() }) } } /// A default implementation of [ProcessMonitor]. #[derive(Debug, Clone, Default)] pub struct DefaultMonitor {} impl ProcessMonitor for DefaultMonitor {} impl DefaultMonitor { pub const fn new() -> Self { Self {} } } /// Process exit status returned by [ProcessMonitor::wait()]. #[derive(Debug)] pub struct Exit { pub ts: OffsetDateTime, pub pid: u32, pub status: i32, } /// Execution result returned by `execute()`. pub struct ExecuteResult { pub exit: Exit, pub status: ExitStatus, pub stdout: String, pub stderr: String, } /// Execute a `Command` and collect exit status, output and error messages. /// /// To collect output and error messages, pipes must be used for Command's stdout and stderr. /// /// Note: invalid UTF-8 characters in output and error messages will be replaced with the `�` char. pub async fn execute( monitor: &T, cmd: Command, ) -> Result { let (tx, rx) = channel::(); let start = monitor.start(cmd, tx); let wait = monitor.wait(rx); let ( Output { stdout, stderr, status, }, exit, ) = tokio::try_join!(start, wait).map_err(Error::InvalidCommand)?; let stdout = String::from_utf8_lossy(&stdout).to_string(); let stderr = String::from_utf8_lossy(&stderr).to_string(); Ok(ExecuteResult { exit, status, stdout, stderr, }) } #[cfg(test)] mod tests { use std::process::Stdio; use tokio::{process::Command, sync::oneshot::channel}; use super::*; #[tokio::test] async fn test_start_wait_without_output() { let monitor = DefaultMonitor::new(); let cmd = Command::new("/bin/ls"); let (tx, rx) = channel(); let output = monitor.start(cmd, tx).await.unwrap(); assert_eq!(output.stdout.len(), 0); assert_eq!(output.stderr.len(), 0); let status = monitor.wait(rx).await.unwrap(); assert_eq!(status.status, 0); } #[tokio::test] async fn test_start_wait_with_output() { let monitor = DefaultMonitor::new(); let mut cmd = Command::new("/bin/ls"); cmd.stdout(Stdio::piped()); let (tx, rx) = channel(); let output = monitor.start(cmd, tx).await.unwrap(); assert!(!output.stdout.is_empty()); assert_eq!(output.stderr.len(), 0); let status = monitor.wait(rx).await.unwrap(); assert_eq!(status.status, 0); } #[tokio::test] async fn test_execute() { let mut cmd = Command::new("/bin/ls"); cmd.stdout(Stdio::piped()); let monitor = DefaultMonitor::new(); let result = execute(&monitor, cmd).await.unwrap(); assert_eq!(result.exit.status, 0); assert!(result.status.success()); assert!(!result.stdout.is_empty()); assert_eq!(result.stderr.len(), 0); } } ================================================ FILE: crates/runc/src/options.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Forked from https://github.com/pwFoo/rust-runc/blob/313e6ae5a79b54455b0a242a795c69adf035141a/src/lib.rs /* * Copyright 2020 fsyncd, Berlin, Germany. * Additional material, copyright of the containerd authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use std::{ path::{Path, PathBuf}, sync::Arc, time::Duration, }; use crate::{error::Error, utils, DefaultExecutor, Io, LogFormat, Runc, Spawner}; // constants for log format pub const JSON: &str = "json"; pub const TEXT: &str = "text"; // constants for runc global flags const DEBUG: &str = "--debug"; const LOG: &str = "--log"; const LOG_FORMAT: &str = "--log-format"; const ROOT: &str = "--root"; const ROOTLESS: &str = "--rootless"; const SYSTEMD_CGROUP: &str = "--systemd-cgroup"; // constants for runc-create/runc-exec flags const CONSOLE_SOCKET: &str = "--console-socket"; const DETACH: &str = "--detach"; const NO_NEW_KEYRING: &str = "--no-new-keyring"; const NO_PIVOT: &str = "--no-pivot"; const PID_FILE: &str = "--pid-file"; // constants for runc-kill flags const ALL: &str = "--all"; // constants for runc-delete flags const FORCE: &str = "--force"; // constant for command pub const DEFAULT_COMMAND: &str = "runc"; pub trait Args { type Output; fn args(&self) -> Self::Output; } /// Global options builder for the runc binary. /// /// These options will be passed for all subsequent runc calls. /// See #[derive(Debug, Default)] pub struct GlobalOpts { /// Override the name of the runc binary. If [`None`], `runc` is used. command: Option, /// Debug logging. /// /// If true, debug level logs are emitted. debug: bool, /// Path to log file. log: Option, /// Log format to use. log_format: LogFormat, /// Path to root directory of container rootfs. root: Option, /// Whether to use rootless mode. /// /// If [`None`], `auto` settings is used. /// Note that "auto" is different from explicit "true" or "false". rootless: Option, /// Set process group ID (gpid). set_pgid: bool, /// Use systemd cgroup. systemd_cgroup: bool, /// Timeout settings for runc command. /// /// Default is 5 seconds. /// This will be used only in AsyncClient. timeout: Duration, /// executor that runs the commands executor: Option>, } impl GlobalOpts { /// Create new config builder with no options. pub fn new() -> Self { Default::default() } pub fn command(mut self, command: impl AsRef) -> Self { self.command = Some(command.as_ref().to_path_buf()); self } /// Set the root directory to store containers' state. /// /// The path should be located on tmpfs. /// Default is `/run/runc`, or `$XDG_RUNTIME_DIR/runc` for rootless containers. pub fn root(mut self, root: impl AsRef) -> Self { self.root = Some(root.as_ref().to_path_buf()); self } /// Enable debug logging. pub fn debug(mut self, debug: bool) -> Self { self.debug = debug; self } /// Set the log destination to path. /// /// The default is to log to stderr. pub fn log(mut self, log: impl AsRef) -> Self { self.log = Some(log.as_ref().to_path_buf()); self } /// Set the log format (default is text). pub fn log_format(mut self, log_format: LogFormat) -> Self { self.log_format = log_format; self } /// Set the log format to JSON. pub fn log_json(self) -> Self { self.log_format(LogFormat::Json) } /// Set the log format to TEXT. pub fn log_text(self) -> Self { self.log_format(LogFormat::Text) } /// Enable systemd cgroup support. /// /// If this is set, the container spec (`config.json`) is expected to have `cgroupsPath` value in // the `slice:prefix:name` form (e.g. `system.slice:runc:434234`). pub fn systemd_cgroup(mut self, systemd_cgroup: bool) -> Self { self.systemd_cgroup = systemd_cgroup; self } /// Enable or disable rootless mode. /// // Default is auto, meaning to auto-detect whether rootless should be enabled. pub fn rootless(mut self, rootless: bool) -> Self { self.rootless = Some(rootless); self } /// Set rootless mode to auto. pub fn rootless_auto(mut self) -> Self { self.rootless = None; self } pub fn set_pgid(mut self, set_pgid: bool) -> Self { self.set_pgid = set_pgid; self } pub fn timeout(&mut self, millis: u64) -> &mut Self { self.timeout = Duration::from_millis(millis); self } pub fn custom_spawner(&mut self, executor: Arc) -> &mut Self { self.executor = Some(executor); self } pub fn build(self) -> Result { self.args() } fn output(&self) -> Result<(PathBuf, Vec), Error> { let path = self .command .clone() .unwrap_or_else(|| PathBuf::from("runc")); let command = utils::binary_path(path).ok_or(Error::NotFound)?; let mut args = Vec::new(); // --root path : Set the root directory to store containers' state. if let Some(root) = &self.root { args.push(ROOT.into()); args.push(utils::abs_string(root)?); } // --debug : Enable debug logging. if self.debug { args.push(DEBUG.into()); } // --log path : Set the log destination to path. The default is to log to stderr. if let Some(log_path) = &self.log { args.push(LOG.into()); args.push(utils::abs_string(log_path)?); } // --log-format text|json : Set the log format (default is text). args.push(LOG_FORMAT.into()); args.push(self.log_format.to_string()); // --systemd-cgroup : Enable systemd cgroup support. if self.systemd_cgroup { args.push(SYSTEMD_CGROUP.into()); } // --rootless true|false|auto : Enable or disable rootless mode. if let Some(mode) = self.rootless { let arg = format!("{}={}", ROOTLESS, mode); args.push(arg); } Ok((command, args)) } } impl Args for GlobalOpts { type Output = Result; fn args(&self) -> Self::Output { let (command, args) = self.output()?; let executor = if let Some(exec) = self.executor.clone() { exec } else { Arc::new(DefaultExecutor {}) }; Ok(Runc { command, args, spawner: executor, }) } } #[derive(Clone, Default)] pub struct CreateOpts { pub io: Option>, /// Path to where a pid file should be created. pub pid_file: Option, /// Path to where a console socket should be created. pub console_socket: Option, /// Detach from the container's process (only available for run) pub detach: bool, /// Don't use pivot_root to jail process inside rootfs. pub no_pivot: bool, /// A new session keyring for the container will not be created. pub no_new_keyring: bool, } impl Args for CreateOpts { type Output = Result, Error>; fn args(&self) -> Self::Output { let mut args: Vec = vec![]; if let Some(pid_file) = &self.pid_file { args.push(PID_FILE.to_string()); args.push(utils::abs_string(pid_file)?); } if let Some(console_socket) = &self.console_socket { args.push(CONSOLE_SOCKET.to_string()); args.push(utils::abs_string(console_socket)?); } if self.no_pivot { args.push(NO_PIVOT.to_string()); } if self.no_new_keyring { args.push(NO_NEW_KEYRING.to_string()); } if self.detach { args.push(DETACH.to_string()); } Ok(args) } } impl CreateOpts { pub fn new() -> Self { Self::default() } pub fn io(mut self, io: Arc) -> Self { self.io = Some(io); self } pub fn pid_file

(mut self, pid_file: P) -> Self where P: AsRef, { self.pid_file = Some(pid_file.as_ref().to_path_buf()); self } pub fn console_socket

(mut self, console_socket: P) -> Self where P: AsRef, { self.console_socket = Some(console_socket.as_ref().to_path_buf()); self } pub fn detach(mut self, detach: bool) -> Self { self.detach = detach; self } pub fn no_pivot(mut self, no_pivot: bool) -> Self { self.no_pivot = no_pivot; self } pub fn no_new_keyring(mut self, no_new_keyring: bool) -> Self { self.no_new_keyring = no_new_keyring; self } } /// Container execution options #[derive(Clone, Default)] pub struct ExecOpts { pub io: Option>, /// Path to where a pid file should be created. pub pid_file: Option, /// Path to where a console socket should be created. pub console_socket: Option, /// Detach from the container's process (only available for run) pub detach: bool, } impl Args for ExecOpts { type Output = Result, Error>; fn args(&self) -> Self::Output { let mut args: Vec = vec![]; if let Some(pid_file) = &self.pid_file { args.push(PID_FILE.to_string()); args.push(utils::abs_string(pid_file)?); } if let Some(console_socket) = &self.console_socket { args.push(CONSOLE_SOCKET.to_string()); args.push(utils::abs_string(console_socket)?); } if self.detach { args.push(DETACH.to_string()); } Ok(args) } } impl ExecOpts { pub fn new() -> Self { Self::default() } pub fn io(mut self, io: Arc) -> Self { self.io = Some(io); self } pub fn pid_file

(mut self, pid_file: P) -> Self where P: AsRef, { self.pid_file = Some(pid_file.as_ref().to_path_buf()); self } pub fn console_socket

(mut self, console_socket: P) -> Self where P: AsRef, { self.console_socket = Some(console_socket.as_ref().to_path_buf()); self } pub fn detach(mut self, detach: bool) -> Self { self.detach = detach; self } } /// Container deletion options #[derive(Debug, Clone, Default)] pub struct DeleteOpts { /// Forcibly delete the container if it is still running pub force: bool, } impl Args for DeleteOpts { type Output = Vec; fn args(&self) -> Self::Output { let mut args: Vec = vec![]; if self.force { args.push(FORCE.to_string()); } args } } impl DeleteOpts { pub fn new() -> Self { Self::default() } pub fn force(mut self, force: bool) -> Self { self.force = force; self } } /// Container killing options #[derive(Debug, Clone, Default)] pub struct KillOpts { /// Seng the kill signal to all the processes inside the container pub all: bool, } impl Args for KillOpts { type Output = Vec; fn args(&self) -> Self::Output { let mut args: Vec = vec![]; if self.all { args.push(ALL.to_string()); } args } } impl KillOpts { pub fn new() -> Self { Self::default() } pub fn all(mut self, all: bool) -> Self { self.all = all; self } } #[cfg(test)] mod tests { use std::env; use super::*; const ARGS_FAIL_MSG: &str = "Args.args() failed."; #[test] fn create_opts_test() { assert_eq!( CreateOpts::new().args().expect(ARGS_FAIL_MSG), Vec::::new() ); assert_eq!( CreateOpts::new().pid_file(".").args().expect(ARGS_FAIL_MSG), vec![ "--pid-file".to_string(), env::current_dir() .unwrap() .to_string_lossy() .parse::() .unwrap() ] ); assert_eq!( CreateOpts::new() .console_socket("..") .args() .expect(ARGS_FAIL_MSG), vec![ "--console-socket".to_string(), env::current_dir() .unwrap() .parent() .unwrap() .to_string_lossy() .parse::() .unwrap() ] ); assert_eq!( CreateOpts::new() .detach(true) .no_pivot(true) .no_new_keyring(true) .args() .expect(ARGS_FAIL_MSG), vec![ "--no-pivot".to_string(), "--no-new-keyring".to_string(), "--detach".to_string(), ] ); } #[test] fn exec_opts_test() { assert_eq!( ExecOpts::new().args().expect(ARGS_FAIL_MSG), Vec::::new() ); assert_eq!( ExecOpts::new().pid_file(".").args().expect(ARGS_FAIL_MSG), vec![ "--pid-file".to_string(), env::current_dir() .unwrap() .to_string_lossy() .parse::() .unwrap() ] ); assert_eq!( ExecOpts::new() .console_socket("..") .args() .expect(ARGS_FAIL_MSG), vec![ "--console-socket".to_string(), env::current_dir() .unwrap() .parent() .unwrap() .to_string_lossy() .parse::() .unwrap() ] ); assert_eq!( ExecOpts::new().detach(true).args().expect(ARGS_FAIL_MSG), vec!["--detach".to_string(),] ); } #[test] fn delete_opts_test() { assert_eq!(DeleteOpts::new().force(false).args(), Vec::::new()); assert_eq!( DeleteOpts::new().force(true).args(), vec!["--force".to_string()], ); } #[test] fn kill_opts_test() { assert_eq!(KillOpts::new().all(false).args(), Vec::::new()); assert_eq!(KillOpts::new().all(true).args(), vec!["--all".to_string()],); } #[cfg(target_os = "linux")] #[test] fn global_opts_test() { let cfg = GlobalOpts::default().command("true"); let runc = cfg.build().unwrap(); let args = &runc.args; assert_eq!(args.len(), 2); assert!(args.contains(&LOG_FORMAT.to_string())); assert!(args.contains(&TEXT.to_string())); let cfg = GlobalOpts::default().command("/bin/true"); let runc = cfg.build().unwrap(); assert_eq!(runc.args.len(), 2); let cfg = GlobalOpts::default() .command("true") .root("/tmp") .debug(true) .log("/tmp/runc.log") .log_json() .systemd_cgroup(true) .rootless(true); let runc = cfg.build().unwrap(); let args = &runc.args; assert!(args.contains(&ROOT.to_string())); assert!(args.contains(&DEBUG.to_string())); assert!(args.contains(&"/tmp".to_string())); assert!(args.contains(&LOG.to_string())); assert!(args.contains(&"/tmp/runc.log".to_string())); assert!(args.contains(&LOG_FORMAT.to_string())); assert!(args.contains(&JSON.to_string())); assert!(args.contains(&"--rootless=true".to_string())); assert!(args.contains(&SYSTEMD_CGROUP.to_string())); assert_eq!(args.len(), 9); } } ================================================ FILE: crates/runc/src/synchronous/io.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{ fmt::Debug, fs::{File, OpenOptions}, io::Result, os::unix::fs::OpenOptionsExt, process::Stdio, sync::Mutex, }; use nix::unistd::{Gid, Uid}; use super::Io; use crate::{Command, Pipe, PipedIo}; #[derive(Debug, Clone)] pub struct IOOption { pub open_stdin: bool, pub open_stdout: bool, pub open_stderr: bool, } impl Default for IOOption { fn default() -> Self { Self { open_stdin: true, open_stdout: true, open_stderr: true, } } } impl PipedIo { pub fn new(uid: u32, gid: u32, opts: &IOOption) -> std::io::Result { Ok(Self { stdin: if opts.open_stdin { Self::create_pipe(uid, gid, true)? } else { None }, stdout: if opts.open_stdout { Self::create_pipe(uid, gid, true)? } else { None }, stderr: if opts.open_stderr { Self::create_pipe(uid, gid, true)? } else { None }, }) } fn create_pipe(uid: u32, gid: u32, stdin: bool) -> std::io::Result> { let pipe = Pipe::new()?; let uid = Some(Uid::from_raw(uid)); let gid = Some(Gid::from_raw(gid)); if stdin { let rd = pipe.rd.try_clone()?; nix::unistd::fchown(rd, uid, gid)?; } else { let wr = pipe .try_clone_wr() .ok_or_else(|| std::io::Error::other("write end closed"))?; nix::unistd::fchown(wr, uid, gid)?; } Ok(Some(pipe)) } } /// IO driver to direct output/error messages to /dev/null. /// /// With this Io driver, all methods of [crate::Runc] can't capture the output/error messages. #[derive(Debug)] pub struct NullIo { dev_null: Mutex>, } impl NullIo { pub fn new() -> std::io::Result { let f = OpenOptions::new().read(true).open("/dev/null")?; let dev_null = Mutex::new(Some(f)); Ok(Self { dev_null }) } } impl Io for NullIo { fn set(&self, cmd: &mut Command) -> std::io::Result<()> { if let Some(null) = self.dev_null.lock().unwrap().as_ref() { cmd.stdout(null.try_clone()?); cmd.stderr(null.try_clone()?); } Ok(()) } fn close_after_start(&self) { let mut m = self.dev_null.lock().unwrap(); let _ = m.take(); } } /// Io driver based on Stdio::inherited(), to direct outputs/errors to stdio. /// /// With this Io driver, all methods of [crate::Runc] can't capture the output/error messages. #[derive(Debug)] pub struct InheritedStdIo {} impl InheritedStdIo { pub fn new() -> std::io::Result { Ok(InheritedStdIo {}) } } impl Io for InheritedStdIo { fn set(&self, cmd: &mut Command) -> std::io::Result<()> { cmd.stdin(Stdio::null()) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()); Ok(()) } fn close_after_start(&self) {} } /// Io driver based on Stdio::piped(), to capture outputs/errors from runC. /// /// With this Io driver, methods of [crate::Runc] may capture the output/error messages. #[derive(Debug)] pub struct PipedStdIo {} impl PipedStdIo { pub fn new() -> std::io::Result { Ok(PipedStdIo {}) } } impl Io for PipedStdIo { fn set(&self, cmd: &mut Command) -> std::io::Result<()> { cmd.stdin(Stdio::null()) .stdout(Stdio::piped()) .stderr(Stdio::piped()); Ok(()) } fn close_after_start(&self) {} } /// FIFO for the scenario that set FIFO for command Io. #[derive(Debug)] pub struct FIFO { pub stdin: Option, pub stdout: Option, pub stderr: Option, } impl Io for FIFO { fn set(&self, cmd: &mut Command) -> Result<()> { if let Some(path) = self.stdin.as_ref() { let stdin = OpenOptions::new() .read(true) .custom_flags(libc::O_NONBLOCK) .open(path)?; cmd.stdin(stdin); } if let Some(path) = self.stdout.as_ref() { let stdout = OpenOptions::new().write(true).open(path)?; cmd.stdout(stdout); } if let Some(path) = self.stderr.as_ref() { let stderr = OpenOptions::new().write(true).open(path)?; cmd.stderr(stderr); } Ok(()) } fn close_after_start(&self) {} } #[cfg(test)] mod tests { use super::*; #[cfg(not(target_os = "macos"))] #[test] fn test_io_option() { let opts = IOOption { open_stdin: false, open_stdout: false, open_stderr: false, }; let io = PipedIo::new(1000, 1000, &opts).unwrap(); assert!(io.stdin().is_none()); assert!(io.stdout().is_none()); assert!(io.stderr().is_none()); } #[cfg(target_os = "linux")] #[test] fn test_create_piped_io() { use std::io::{Read, Write}; let opts = IOOption::default(); let uid = nix::unistd::getuid(); let gid = nix::unistd::getgid(); let io = PipedIo::new(uid.as_raw(), gid.as_raw(), &opts).unwrap(); let mut buf = [0xfau8]; let mut stdin = io.stdin().unwrap(); stdin.write_all(&buf).unwrap(); buf[0] = 0x0; io.stdin .as_ref() .map(|v| v.rd.try_clone().unwrap().read(&mut buf).unwrap()); assert_eq!(&buf, &[0xfau8]); let mut stdout = io.stdout().unwrap(); buf[0] = 0xce; io.stdout .as_ref() .map(|v| v.try_clone_wr().unwrap().write(&buf).unwrap()); buf[0] = 0x0; stdout.read_exact(&mut buf).unwrap(); assert_eq!(&buf, &[0xceu8]); let mut stderr = io.stderr().unwrap(); buf[0] = 0xa5; io.stderr .as_ref() .map(|v| v.try_clone_wr().unwrap().write(&buf).unwrap()); buf[0] = 0x0; stderr.read_exact(&mut buf).unwrap(); assert_eq!(&buf, &[0xa5u8]); io.close_after_start(); stdout.read_exact(&mut buf).unwrap_err(); stderr.read_exact(&mut buf).unwrap_err(); } #[test] fn test_null_io() { let io = NullIo::new().unwrap(); assert!(io.stdin().is_none()); assert!(io.stdout().is_none()); assert!(io.stderr().is_none()); } } ================================================ FILE: crates/runc/src/synchronous/mod.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ pub mod io; mod pipe; mod runc; use std::{ fmt::Debug, io::{Read, Result, Write}, }; pub use pipe::Pipe; pub use runc::{DefaultExecutor, Spawner}; use crate::Command; pub trait Io: Debug + Send + Sync { /// Return write side of stdin fn stdin(&self) -> Option> { None } /// Return read side of stdout fn stdout(&self) -> Option> { None } /// Return read side of stderr fn stderr(&self) -> Option> { None } /// Set IO for passed command. /// Read side of stdin, write side of stdout and write side of stderr should be provided to command. fn set(&self, cmd: &mut Command) -> Result<()>; /// Only close write side (should be stdout/err "from" runc process) fn close_after_start(&self); } #[derive(Debug)] pub struct PipedIo { pub stdin: Option, pub stdout: Option, pub stderr: Option, } impl Io for PipedIo { fn stdin(&self) -> Option> { self.stdin .as_ref() .and_then(|pipe| pipe.try_clone_wr()) .map(|x| Box::new(x) as Box) } fn stdout(&self) -> Option> { self.stdout.as_ref().and_then(|pipe| { pipe.rd .try_clone() .map(|x| Box::new(x) as Box) .ok() }) } fn stderr(&self) -> Option> { self.stderr.as_ref().and_then(|pipe| { pipe.rd .try_clone() .map(|x| Box::new(x) as Box) .ok() }) } // Note that this internally use [`std::fs::File`]'s `try_clone()`. // Thus, the files passed to commands will be not closed after command exit. fn set(&self, cmd: &mut Command) -> std::io::Result<()> { if let Some(p) = self.stdin.as_ref() { let pr = p.rd.try_clone()?; cmd.stdin(pr); } if let Some(p) = self.stdout.as_ref() { let pw = p .try_clone_wr() .ok_or_else(|| std::io::Error::other("write end closed"))?; cmd.stdout(pw); } if let Some(p) = self.stderr.as_ref() { let pw = p .try_clone_wr() .ok_or_else(|| std::io::Error::other("write end closed"))?; cmd.stdout(pw); } Ok(()) } fn close_after_start(&self) { if let Some(p) = self.stdout.as_ref() { p.close_wr(); } if let Some(p) = self.stderr.as_ref() { p.close_wr(); } } } ================================================ FILE: crates/runc/src/synchronous/pipe.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{ io::{PipeReader, PipeWriter}, sync::Mutex, }; #[derive(Debug)] pub struct Pipe { pub rd: PipeReader, wr: Mutex>, } impl Pipe { pub fn new() -> std::io::Result { let (rd, wr) = std::io::pipe()?; Ok(Self { rd, wr: Mutex::new(Some(wr)), }) } /// Clone the write end. Returns `None` if closed. pub fn try_clone_wr(&self) -> Option { self.wr .lock() .unwrap() .as_ref() .and_then(|w| w.try_clone().ok()) } /// Close the write end by dropping it. No-op if already closed. pub fn close_wr(&self) { let _ = self.wr.lock().unwrap().take(); } } ================================================ FILE: crates/runc/src/synchronous/runc.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{fmt::Debug, path::Path, process::ExitStatus}; use oci_spec::runtime::{LinuxResources, Process}; use crate::{ container::Container, error::Error, events, options::*, utils::{self, write_value_to_temp_file}, Command, Response, Result, Runc, }; impl Runc { pub(crate) fn launch(&self, cmd: Command, combined_output: bool) -> Result { let (status, pid, stdout, stderr) = self.spawner.execute(cmd)?; if status.success() { let output = if combined_output { stdout + stderr.as_str() } else { stdout }; Ok(Response { pid, status, output, }) } else { Err(Error::CommandFailed { status, stdout, stderr, }) } } /// Create a new container pub fn create

(&self, id: &str, bundle: P, opts: Option<&CreateOpts>) -> Result where P: AsRef, { let mut args = vec![ "create".to_string(), "--bundle".to_string(), utils::abs_string(bundle)?, ]; if let Some(opts) = opts { args.append(&mut opts.args()?); } args.push(id.to_string()); let mut cmd = self.command(&args)?; match opts { Some(CreateOpts { io: Some(io), .. }) => { io.set(&mut cmd).map_err(|e| Error::IoSet(e.to_string()))?; let res = self.launch(cmd, true)?; io.close_after_start(); Ok(res) } _ => self.launch(cmd, true), } } /// Delete a container pub fn delete(&self, id: &str, opts: Option<&DeleteOpts>) -> Result<()> { let mut args = vec!["delete".to_string()]; if let Some(opts) = opts { args.append(&mut opts.args()); } args.push(id.to_string()); self.launch(self.command(&args)?, true)?; Ok(()) } /// Execute an additional process inside the container pub fn exec(&self, id: &str, spec: &Process, opts: Option<&ExecOpts>) -> Result<()> { let (_temp_file, filename) = write_value_to_temp_file(spec)?; let mut args = vec!["exec".to_string(), "--process".to_string(), filename]; if let Some(opts) = opts { args.append(&mut opts.args()?); } args.push(id.to_string()); let mut cmd = self.command(&args)?; match opts { Some(ExecOpts { io: Some(io), .. }) => { io.set(&mut cmd).map_err(|e| Error::IoSet(e.to_string()))?; self.launch(cmd, true)?; io.close_after_start(); } _ => { self.launch(cmd, true)?; } } Ok(()) } /// Send the specified signal to processes inside the container pub fn kill(&self, id: &str, sig: u32, opts: Option<&KillOpts>) -> Result<()> { let mut args = vec!["kill".to_string()]; if let Some(opts) = opts { args.append(&mut opts.args()); } args.push(id.to_string()); args.push(sig.to_string()); let _ = self.launch(self.command(&args)?, true)?; Ok(()) } /// List all containers associated with this runc instance pub fn list(&self) -> Result> { let args = ["list".to_string(), "--format=json".to_string()]; let res = self.launch(self.command(&args)?, true)?; let output = res.output.trim(); // Ugly hack to work around golang Ok(if output == "null" { Vec::new() } else { serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)? }) } /// Pause a container pub fn pause(&self, id: &str) -> Result<()> { let args = ["pause".to_string(), id.to_string()]; let _ = self.launch(self.command(&args)?, true)?; Ok(()) } /// Resume a container pub fn resume(&self, id: &str) -> Result<()> { let args = ["resume".to_string(), id.to_string()]; let _ = self.launch(self.command(&args)?, true)?; Ok(()) } pub fn checkpoint(&self) -> Result<()> { Err(Error::Unimplemented("checkpoint".to_string())) } pub fn restore(&self) -> Result<()> { Err(Error::Unimplemented("restore".to_string())) } /// List all the processes inside the container, returning their pids pub fn ps(&self, id: &str) -> Result> { let args = [ "ps".to_string(), "--format=json".to_string(), id.to_string(), ]; let res = self.launch(self.command(&args)?, false)?; let output = res.output.trim(); // Ugly hack to work around golang Ok(if output == "null" { Vec::new() } else { serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)? }) } /// Run the create, start, delete lifecycle of the container and return its exit status pub fn run

(&self, id: &str, bundle: P, opts: Option<&CreateOpts>) -> Result where P: AsRef, { let mut args = vec![ "run".to_string(), "--bundle".to_string(), utils::abs_string(bundle)?, ]; if let Some(opts) = opts { args.append(&mut opts.args()?); } args.push(id.to_string()); let mut cmd = self.command(&args)?; if let Some(CreateOpts { io: Some(io), .. }) = opts { io.set(&mut cmd).map_err(|e| Error::IoSet(e.to_string()))?; }; self.launch(cmd, true) } /// Start an already created container pub fn start(&self, id: &str) -> Result { let args = ["start".to_string(), id.to_string()]; self.launch(self.command(&args)?, true) } /// Return the state of a container pub fn state(&self, id: &str) -> Result { let args = ["state".to_string(), id.to_string()]; let res = self.launch(self.command(&args)?, true)?; serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed) } /// Return the latest statistics for a container pub fn stats(&self, id: &str) -> Result { let args = vec!["events".to_string(), "--stats".to_string(), id.to_string()]; let res = self.launch(self.command(&args)?, true)?; let event: events::Event = serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed)?; if let Some(stats) = event.stats { Ok(stats) } else { Err(Error::MissingContainerStats) } } /// Update a container with the provided resource spec pub fn update(&self, id: &str, resources: &LinuxResources) -> Result<()> { let (_temp_file, filename) = write_value_to_temp_file(resources)?; let args = [ "update".to_string(), "--resources".to_string(), filename, id.to_string(), ]; self.launch(self.command(&args)?, true)?; Ok(()) } } pub trait Spawner: Debug { fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)>; } #[derive(Debug)] pub struct DefaultExecutor {} impl Spawner for DefaultExecutor { fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)> { let mut cmd = cmd; let child = cmd.spawn().map_err(Error::ProcessSpawnFailed)?; let pid = child.id(); let result = child.wait_with_output().map_err(Error::InvalidCommand)?; let status = result.status; let stdout = String::from_utf8_lossy(&result.stdout).to_string(); let stderr = String::from_utf8_lossy(&result.stderr).to_string(); Ok((status, pid, stdout, stderr)) } } #[cfg(test)] #[cfg(target_os = "linux")] mod tests { use std::sync::Arc; use oci_spec::runtime::Process; use crate::{ error::Error, io::{InheritedStdIo, PipedStdIo}, options::{CreateOpts, DeleteOpts, ExecOpts, GlobalOpts}, Runc, }; fn ok_client() -> Runc { GlobalOpts::new() .command("/bin/true") .build() .expect("unable to create runc instance") } fn fail_client() -> Runc { GlobalOpts::new() .command("/bin/false") .build() .expect("unable to create runc instance") } fn echo_client() -> Runc { GlobalOpts::new() .command("/bin/echo") .build() .expect("unable to create runc instance") } fn dummy_process() -> Process { serde_json::from_str( " { \"user\": { \"uid\": 1000, \"gid\": 1000 }, \"cwd\": \"/path/to/dir\" }", ) .unwrap() } #[test] fn test_create() { let opts = CreateOpts::new(); let ok_runc = ok_client(); let response = ok_runc .create("fake-id", "fake-bundle", Some(&opts)) .expect("true failed."); assert_ne!(response.pid, 0); assert!(response.status.success()); assert!(response.output.is_empty()); let fail_runc = fail_client(); match fail_runc.create("fake-id", "fake-bundle", Some(&opts)) { Ok(_) => panic!("fail_runc returned exit status 0."), Err(Error::CommandFailed { status, stdout, stderr, }) => { if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() { eprintln!("fail_runc succeeded."); } else { panic!("unexpected outputs from fail_runc.") } } Err(e) => panic!("unexpected error from fail_runc: {:?}", e), } } #[test] fn test_run() { let opts = CreateOpts::new(); let ok_runc = ok_client(); let response = ok_runc .run("fake-id", "fake-bundle", Some(&opts)) .expect("true failed."); assert_ne!(response.pid, 0); assert!(response.status.success()); assert!(response.output.is_empty()); let fail_runc = fail_client(); match fail_runc.run("fake-id", "fake-bundle", Some(&opts)) { Ok(_) => panic!("fail_runc returned exit status 0."), Err(Error::CommandFailed { status, stdout, stderr, }) => { if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() { eprintln!("fail_runc succeeded."); } else { panic!("unexpected outputs from fail_runc.") } } Err(e) => panic!("unexpected error from fail_runc: {:?}", e), } } #[test] fn test_exec() { let opts = ExecOpts::new(); let ok_runc = ok_client(); let proc = dummy_process(); ok_runc .exec("fake-id", &proc, Some(&opts)) .expect("true failed."); eprintln!("ok_runc succeeded."); let fail_runc = fail_client(); match fail_runc.exec("fake-id", &proc, Some(&opts)) { Ok(_) => panic!("fail_runc returned exit status 0."), Err(Error::CommandFailed { status, stdout, stderr, }) => { if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() { eprintln!("fail_runc succeeded."); } else { panic!("unexpected outputs from fail_runc.") } } Err(e) => panic!("unexpected error from fail_runc: {:?}", e), } } #[test] fn test_delete() { let opts = DeleteOpts::new(); let ok_runc = ok_client(); ok_runc .delete("fake-id", Some(&opts)) .expect("true failed."); eprintln!("ok_runc succeeded."); let fail_runc = fail_client(); match fail_runc.delete("fake-id", Some(&opts)) { Ok(_) => panic!("fail_runc returned exit status 0."), Err(Error::CommandFailed { status, stdout, stderr, }) => { if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() { eprintln!("fail_runc succeeded."); } else { panic!("unexpected outputs from fail_runc.") } } Err(e) => panic!("unexpected error from fail_runc: {:?}", e), } } #[test] fn test_output() { // test create cmd with inherit Io, expect empty cmd output let mut opts = CreateOpts::new(); opts.io = Some(Arc::new(InheritedStdIo::new().unwrap())); let echo_runc = echo_client(); let response = echo_runc .create("fake-id", "fake-bundle", Some(&opts)) .expect("echo failed."); assert_ne!(response.pid, 0); assert!(response.status.success()); assert!(response.output.is_empty()); // test create cmd with pipe Io, expect nonempty cmd output let mut opts = CreateOpts::new(); opts.io = Some(Arc::new(PipedStdIo::new().unwrap())); let echo_runc = echo_client(); let response = echo_runc .create("fake-id", "fake-bundle", Some(&opts)) .expect("echo failed."); assert_ne!(response.pid, 0); assert!(response.status.success()); assert!(!response.output.is_empty()); } } ================================================ FILE: crates/runc/src/utils.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #[cfg(not(feature = "async"))] use std::io::Write; use std::{ env, path::{Path, PathBuf}, }; use serde::Serialize; #[cfg(not(feature = "async"))] use tempfile::{Builder, NamedTempFile}; #[cfg(feature = "async")] use tokio::io::AsyncWriteExt; use uuid::Uuid; use crate::error::Error; // helper to resolve path (such as path for runc binary, pid files, etc. ) pub fn abs_path_buf

(path: P) -> Result where P: AsRef, { let abs = std::path::absolute(path).map_err(Error::InvalidPath)?; let mut normalized = PathBuf::new(); for component in abs.components() { match component { std::path::Component::ParentDir => { normalized.pop(); } std::path::Component::CurDir => {} c => normalized.push(c), } } Ok(normalized) } fn path_to_string(path: impl AsRef) -> Result { path.as_ref() .to_str() .map(|v| v.to_string()) .ok_or_else(|| { Error::InvalidPath(std::io::Error::other(format!( "invalid UTF-8 string: {}", path.as_ref().to_string_lossy() ))) }) } pub fn abs_string

(path: P) -> Result where P: AsRef, { path_to_string(abs_path_buf(path)?) } /// Returns a temp dir. If the environment variable "XDG_RUNTIME_DIR" is set, return its value. /// Otherwise if `std::env::temp_dir()` failed, return current dir or return the temp dir depended on OS. fn xdg_runtime_dir() -> String { env::var("XDG_RUNTIME_DIR") .unwrap_or_else(|_| abs_string(env::temp_dir()).unwrap_or_else(|_| ".".to_string())) } /// Write the serialized 'value' to a temp file #[cfg(not(feature = "async"))] pub fn write_value_to_temp_file(value: &T) -> Result<(NamedTempFile, String), Error> { let filename = format!("{}/runc-process-{}", xdg_runtime_dir(), Uuid::new_v4()); let mut temp_file = Builder::new() .prefix(&filename) .rand_bytes(0) .tempfile() .map_err(Error::SpecFileCreationFailed)?; let f = temp_file.as_file_mut(); let spec_json = serde_json::to_string(value).map_err(Error::JsonDeserializationFailed)?; f.write(spec_json.as_bytes()) .map_err(Error::SpecFileCreationFailed)?; f.flush().map_err(Error::SpecFileCreationFailed)?; Ok((temp_file, filename)) } /// Write the serialized 'value' to a temp file /// Unlike the same function in non-async feature, /// it returns the filename, without the NamedTempFile object, /// which implements Drop trait to remove the file if it goes out of scope. /// the async Drop is still not supported in rust, /// in async context, the created file should be removed by the caller #[cfg(feature = "async")] pub async fn write_value_to_temp_file(value: &T) -> Result { let filename = format!("{}/runc-process-{}", xdg_runtime_dir(), Uuid::new_v4()); let mut f = tokio::fs::OpenOptions::new() .create(true) .truncate(true) .write(true) .open(&filename) .await .map_err(Error::FileSystemError)?; let spec_json = serde_json::to_string(value).map_err(Error::JsonDeserializationFailed)?; f.write_all(spec_json.as_bytes()) .await .map_err(Error::SpecFileCreationFailed)?; f.flush().await.map_err(Error::SpecFileCreationFailed)?; Ok(filename) } /// Resolve a binary path according to the `PATH` environment variable. /// /// Note, the case that `path` is already an absolute path is implicitly handled by /// `dir.join(path.as_ref())`. `Path::join(parent_path, path)` directly returns `path` when `path` /// is an absolute path. pub fn binary_path

(path: P) -> Option where P: AsRef, { env::var_os("PATH").and_then(|paths| { env::split_paths(&paths).find_map(|dir| { let full_path = dir.join(path.as_ref()); if full_path.is_file() { Some(full_path) } else { None } }) }) } ================================================ FILE: crates/runc-shim/Cargo.toml ================================================ [package] name = "containerd-runc-shim" version = "0.2.0" authors = [ "Shaobao Feng ", "Tianyang Zhang ", "The containerd Authors", ] description = "Rust implementation of containerd's runc v2 shim runtime" keywords = ["containerd", "shim", "containers"] categories = ["api-bindings", "asynchronous"] edition.workspace = true license.workspace = true repository.workspace = true homepage.workspace = true [[bin]] # Overwrite the binary name so it can be referred as "io.containerd.runc.v2-rs" from containerd. # Note: the runtime's binary name must start with "io.containerd.runc" in order to # keep compatibility with Go runc runtime and the containerd client. # Example: https://github.com/containerd/containerd/blob/8047eb2fcac1f4553ee7652862194b1e10855ce7/task_opts_unix.go#L33 name = "containerd-shim-runc-v2-rs" path = "src/main.rs" doc = false [dependencies] containerd-shim = { path = "../shim", version = "0.11.0", features = ["async"] } libc.workspace = true log.workspace = true nix = { workspace = true, features = ["socket", "uio", "term", "signal"] } oci-spec = { workspace = true, features = ["runtime"] } runc = { path = "../runc", version = "0.3.0", features = ["async"] } serde = { workspace = true, features = ["derive", "std"] } serde_json = { workspace = true, features = ["std"] } time = { workspace = true, features = ["std"] } uuid = { workspace = true, features = ["v4"] } # Async dependencies async-trait.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread", "process", "sync", "fs", "io-util", "net", "time", "signal"] } rustix = { version = "1.1", default-features = false, features = ["std", "termios"] } [package.metadata.cargo-machete] ignored = ["libc"] [target.'cfg(target_os = "linux")'.dependencies] cgroups-rs.workspace = true nix = { workspace = true, features = ["event"] } tokio-eventfd = "0.2.2" ================================================ FILE: crates/runc-shim/README.md ================================================ # Rust containerd shim v2 for runc container [![Crates.io](https://img.shields.io/crates/v/containerd-runc-shim)](https://crates.io/crates/containerd-runc-shim) [![docs.rs](https://img.shields.io/docsrs/containerd-runc-shim)](https://docs.rs/containerd-runc-shim/latest/containerd-runc-shim/) [![Crates.io](https://img.shields.io/crates/l/containerd-shim)](https://github.com/containerd/rust-extensions/blob/main/LICENSE) [![CI](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml) By default [containerd](https://github.com/containerd/containerd) relies on runc shim v2 runtime (written in `Go`) to launch containers. This crate is an alternative Rust implementation of the shim runtime. It conforms to containerd's integration tests and can be replaced with the original Go runtime interchangeably. ## Usage To build binary, run: ```shell cargo build --release --bin containerd-shim-runc-v2-rs ``` Replace it to the containerd shim dir: `/usr/local/bin/containerd-shim-runc-v2-rs` In order to use it from containerd, use: ```shell $ sudo ctr run --rm --runtime io.containerd.runc.v2-rs -t docker.io/library/hello-world:latest hello ``` You can run a container by `ctr`, `crictl` or kubernetes API. ## Performance test ### Memory overhead Three different kinds of shim binaries are used to compare memory overhead, first is `containerd-shimv2-runc-v2` compiled by golang, next is our sync `containerd-shim-runc-v2-rs` and the last one is our async `containerd-shim-runc-v2-rs` but limited to 2 work threads. We run a *busybox* container inside a pod on a *16U32G Ubuntu20.04* mechine with *containerd v1.6.8* and *runc v1.1.4*. To measure the memory size of shim process we parse the output of *smaps* file and add up all RSS segments. In addition, we also run 100 pods and collect the total memory overhead. | | Single Process RSS | 100 Processes RSS | | :----------------------------------------------------------- | :----------------- | :---------------- | | containerd-shim-runc-v2 | 11.02MB | 1106.52MB | | containerd-shim-runc-v2-rs(sync) | 3.45MB | 345.39MB | | containerd-shim-runc-v2-rs(async, limited to 2 work threads) | 3.90MB | 396.83MB | ================================================ FILE: crates/runc-shim/build.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{process::Command, str::from_utf8}; fn main() { let output = match Command::new("git").arg("rev-parse").arg("HEAD").output() { Ok(output) => output, Err(_) => { return; } }; let mut hash = from_utf8(&output.stdout).unwrap().trim().to_string(); let output_dirty = match Command::new("git").arg("diff").arg("--exit-code").output() { Ok(output) => output, Err(_) => { return; } }; if !output_dirty.status.success() { hash.push_str(".m"); } println!("cargo:rustc-env=CARGO_GIT_HASH={}", hash); } ================================================ FILE: crates/runc-shim/src/cgroup_memory.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![cfg(target_os = "linux")] use std::{os::unix::io::AsRawFd, path::Path}; use containerd_shim::{ error::{Error, Result}, io_error, other_error, }; use tokio::{ fs::{self, read_to_string}, io::AsyncReadExt, sync::mpsc::{self, Receiver}, }; use tokio_eventfd::EventFd; pub async fn get_path_from_cgorup(pid: u32) -> Result { let proc_path = format!("/proc/{}/cgroup", pid); let path_string = read_to_string(&proc_path) .await .map_err(io_error!(e, "open {}.", &proc_path))?; let (_, path) = path_string .lines() .find(|line| line.contains("memory")) .ok_or(Error::Other("Memory line not found".into()))? .split_once(":memory:") .ok_or(Error::Other("Failed to parse memory line".into()))?; Ok(path.to_string()) } pub async fn get_existing_cgroup_mem_path(pid_path: String) -> Result<(String, String)> { let (mut mount_root, mount_point) = get_path_from_mountinfo().await?; if mount_root == "/" { mount_root = String::from(""); } let mount_root = pid_path.trim_start_matches(&mount_root).to_string(); Ok((mount_root, mount_point)) } async fn get_path_from_mountinfo() -> Result<(String, String)> { let mountinfo_path = "/proc/self/mountinfo"; let mountinfo_string = read_to_string(mountinfo_path) .await .map_err(io_error!(e, "open {}.", mountinfo_path))?; let line = mountinfo_string .lines() .find(|line| line.contains("cgroup") && line.contains("memory")) .ok_or(Error::Other( "Lines containers cgroup and memory not found in mountinfo".into(), ))?; parse_memory_mountroot(line) } fn parse_memory_mountroot(line: &str) -> Result<(String, String)> { let mut columns = line.split_whitespace(); let mount_root = columns.nth(3).ok_or(Error::Other( "Invalid input information about mountinfo".into(), ))?; let mount_point = columns.next().ok_or(Error::Other( "Invalid input information about mountinfo".into(), ))?; Ok((mount_root.to_string(), mount_point.to_string())) } pub async fn register_memory_event( key: &str, cg_dir: &Path, event_name: &str, ) -> Result> { let path = cg_dir.join(event_name); let event_file = fs::File::open(path.clone()) .await .map_err(other_error!("Error get path:"))?; let mut eventfd = EventFd::new(0, false).map_err(other_error!("Error create eventfd:"))?; let event_control_path = cg_dir.join("cgroup.event_control"); let data = format!("{} {}", eventfd.as_raw_fd(), event_file.as_raw_fd()); fs::write(&event_control_path, data.clone()) .await .map_err(other_error!("Error write eventfd:"))?; let mut buf = [0u8; 8]; let (sender, receiver) = mpsc::channel(128); let key = key.to_string(); tokio::spawn(async move { loop { match eventfd.read(&mut buf).await { Ok(0) => return, Err(_) => return, _ => (), } if !Path::new(&event_control_path).exists() { return; } sender.send(key.clone()).await.unwrap(); } }); Ok(receiver) } #[cfg(test)] mod tests { use std::path::Path; use cgroups_rs::{ fs::{ hierarchies::{self, is_cgroup2_unified_mode}, memory::MemController, Cgroup, }, CgroupPid, }; use tokio::{fs::remove_file, io::AsyncWriteExt, process::Command}; use crate::cgroup_memory; #[tokio::test] async fn test_cgroupv1_oom_monitor() { if !is_cgroup2_unified_mode() { // Create a memory cgroup with limits on both memory and swap. let path = "cgroupv1_oom_monitor"; let cg = Cgroup::new(hierarchies::auto(), path).unwrap(); let mem_controller: &MemController = cg.controller_of().unwrap(); mem_controller.set_limit(10 * 1024 * 1024).unwrap(); // 10M mem_controller.set_swappiness(0).unwrap(); // Create a sh sub process, and let it wait for the stdinput. let mut child_process = Command::new("sh") .stdin(std::process::Stdio::piped()) .spawn() .unwrap(); let pid = child_process.id().unwrap(); // Add the sh subprocess to the cgroup. cg.add_task_by_tgid(CgroupPid::from(pid as u64)).unwrap(); // Set oom monitor let path_from_cgorup = cgroup_memory::get_path_from_cgorup(pid).await.unwrap(); let (mount_root, mount_point) = cgroup_memory::get_existing_cgroup_mem_path(path_from_cgorup) .await .unwrap(); let mem_cgroup_path = mount_point + &mount_root; let mut rx = cgroup_memory::register_memory_event( pid.to_string().as_str(), Path::new(&mem_cgroup_path), "memory.oom_control", ) .await .unwrap(); // Exec the sh subprocess to a dd command that consumes more than 10M of memory. if let Some(mut stdin) = child_process.stdin.take() { stdin .write_all( b"exec dd if=/dev/zero of=/tmp/test_oom_monitor_file bs=11M count=1\n", ) .await .unwrap(); stdin.flush().await.unwrap(); } // Wait for the oom message. if let Some(item) = rx.recv().await { assert_eq!(pid.to_string(), item, "Receive error oom message"); } // Clean. child_process.wait().await.unwrap(); cg.delete().unwrap(); remove_file("/tmp/test_oom_monitor_file").await.unwrap(); } } } ================================================ FILE: crates/runc-shim/src/common.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{ env, future::Future, io::IoSliceMut, ops::Deref, os::{ fd::{AsRawFd, FromRawFd, OwnedFd}, unix::io::RawFd, }, path::Path, sync::Arc, time::Duration, }; use containerd_shim::{ api::{ExecProcessRequest, Options}, io_error, other, other_error, util::IntoOption, Error, }; use log::{debug, warn}; use nix::{ cmsg_space, sys::{ socket::{recvmsg, ControlMessageOwned, MsgFlags, UnixAddr}, termios::tcgetattr, }, }; use oci_spec::runtime::{LinuxNamespaceType, Spec}; use runc::{ io::{Io, NullIo, FIFO}, options::GlobalOpts, Runc, Spawner, }; use serde::Deserialize; use super::io::Stdio; pub const GROUP_LABELS: [&str; 2] = [ "io.containerd.runc.v2.group", "io.kubernetes.cri.sandbox-id", ]; pub const INIT_PID_FILE: &str = "init.pid"; pub const LOG_JSON_FILE: &str = "log.json"; pub const FIFO_SCHEME: &str = "fifo"; const TIMEOUT_DURATION: std::time::Duration = Duration::from_secs(3); #[derive(Deserialize)] pub struct Log { pub level: String, pub msg: String, } #[derive(Default)] pub struct ProcessIO { pub uri: Option, pub io: Option>, pub copy: bool, } pub fn create_io( id: &str, _io_uid: u32, _io_gid: u32, stdio: &Stdio, ) -> containerd_shim::Result { let mut pio = ProcessIO::default(); if stdio.is_null() { let nio = NullIo::new().map_err(io_error!(e, "new Null Io"))?; pio.io = Some(Arc::new(nio)); return Ok(pio); } let stdout = stdio.stdout.as_str(); let scheme_path = stdout.trim().split("://").collect::>(); let scheme: &str; if scheme_path.len() <= 1 { // no scheme specified, default schema to fifo scheme = FIFO_SCHEME; pio.uri = Some(format!("{}://{}", scheme, stdout)); } else { scheme = scheme_path[0]; pio.uri = Some(stdout.to_string()); } if scheme == FIFO_SCHEME { debug!( "create named pipe io for container {}, stdin: {}, stdout: {}, stderr: {}", id, stdio.stdin.as_str(), stdio.stdout.as_str(), stdio.stderr.as_str() ); let io = FIFO { stdin: stdio.stdin.to_string().none_if(|x| x.is_empty()), stdout: stdio.stdout.to_string().none_if(|x| x.is_empty()), stderr: stdio.stderr.to_string().none_if(|x| x.is_empty()), }; pio.io = Some(Arc::new(io)); pio.copy = false; } Ok(pio) } #[derive(Default, Debug)] pub struct ShimExecutor {} pub fn get_spec_from_request( req: &ExecProcessRequest, ) -> containerd_shim::Result { if let Some(val) = req.spec.as_ref() { let mut p = serde_json::from_slice::(val.value.as_slice())?; p.set_terminal(Some(req.terminal)); Ok(p) } else { Err(Error::InvalidArgument("no spec in request".to_string())) } } pub fn check_kill_error(emsg: String) -> Error { let emsg = emsg.to_lowercase(); if emsg.contains("process already finished") || emsg.contains("container not running") || emsg.contains("no such process") { Error::NotFoundError("process already finished".to_string()) } else if emsg.contains("does not exist") { Error::NotFoundError("no such container".to_string()) } else { other!("unknown error after kill {}", emsg) } } const DEFAULT_RUNC_ROOT: &str = "/run/containerd/runc"; const DEFAULT_COMMAND: &str = "runc"; pub fn create_runc( runtime: &str, namespace: &str, bundle: impl AsRef, opts: &Options, spawner: Option>, ) -> containerd_shim::Result { let runtime = if runtime.is_empty() { DEFAULT_COMMAND } else { runtime }; let root = opts.root.as_str(); let root = Path::new(if root.is_empty() { DEFAULT_RUNC_ROOT } else { root }) .join(namespace); let log = bundle.as_ref().join(LOG_JSON_FILE); let mut gopts = GlobalOpts::default() .command(runtime) .root(root) .log(log) .log_json() .systemd_cgroup(opts.systemd_cgroup); if let Some(s) = spawner { gopts.custom_spawner(s); } gopts .build() .map_err(other_error!("unable to create runc instance")) } #[derive(Default)] pub(crate) struct CreateConfig {} pub fn receive_socket(stream_fd: RawFd) -> containerd_shim::Result { let mut buf = [0u8; 4096]; let mut iovec = [IoSliceMut::new(&mut buf)]; let mut space = cmsg_space!([RawFd; 2]); let (path, fds) = match recvmsg::(stream_fd, &mut iovec, Some(&mut space), MsgFlags::empty()) { Ok(msg) => { let iter = msg.cmsgs(); if let Some(ControlMessageOwned::ScmRights(fds)) = iter?.next() { (iovec[0].deref(), fds) } else { return Err(other!("received message is empty")); } } Err(e) => { return Err(other!("failed to receive message: {}", e)); } }; if fds.is_empty() { return Err(other!("received message is empty")); } let path = String::from_utf8(Vec::from(path)).unwrap_or_else(|e| { warn!("failed to get path from array {}", e); "".to_string() }); let fd = unsafe { OwnedFd::from_raw_fd(fds[0]) }; let path = path.trim_matches(char::from(0)); debug!( "copy_console: console socket get path: {}, fd: {}", path, fd.as_raw_fd(), ); tcgetattr(&fd)?; Ok(fd) } pub fn has_shared_pid_namespace(spec: &Spec) -> bool { match spec.linux() { None => true, Some(linux) => match linux.namespaces() { None => true, Some(namespaces) => { for ns in namespaces { if ns.typ() == LinuxNamespaceType::Pid && ns.path().is_none() { return false; } } true } }, } } /// Returns a temp dir. If the environment variable "XDG_RUNTIME_DIR" is set, return its value. /// Otherwise if `std::env::temp_dir()` failed, return current dir or return the temp dir depended on OS. pub(crate) fn xdg_runtime_dir() -> String { env::var("XDG_RUNTIME_DIR") .unwrap_or_else(|_| env::temp_dir().to_str().unwrap_or(".").to_string()) } pub async fn handle_file_open(file_op: F) -> Result where F: FnOnce() -> Fut, Fut: Future> + Send, { match tokio::time::timeout(TIMEOUT_DURATION, file_op()).await { Ok(result) => result, Err(_) => Err(std::io::Error::new( std::io::ErrorKind::TimedOut, "File operation timed out", )), } } ================================================ FILE: crates/runc-shim/src/console.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::path::{Path, PathBuf}; use containerd_shim::{io_error, util::mkdir, Error, Result}; use log::warn; use tokio::net::{UnixListener, UnixStream}; use uuid::Uuid; use crate::common::xdg_runtime_dir; pub struct ConsoleSocket { pub listener: UnixListener, pub path: PathBuf, pub rmdir: bool, } impl ConsoleSocket { pub async fn new() -> Result { let dir = format!("{}/pty{}", xdg_runtime_dir(), Uuid::new_v4()); mkdir(&dir, 0o711).await?; let file_name = Path::new(&dir).join("pty.sock"); let listener = UnixListener::bind(&file_name).map_err(io_error!( e, "bind socket {}", file_name.display() ))?; Ok(ConsoleSocket { listener, path: file_name, rmdir: true, }) } pub async fn accept(&self) -> Result { let (stream, _addr) = self .listener .accept() .await .map_err(io_error!(e, "failed to list console socket"))?; Ok(stream) } // async drop is not supported yet, we can only call clean manually after socket received pub async fn clean(self) { if self.rmdir { if let Some(tmp_socket_dir) = self.path.parent() { tokio::fs::remove_dir_all(tmp_socket_dir) .await .unwrap_or_else(|e| { warn!( "remove tmp console socket path {} : {}", tmp_socket_dir.display(), e ) }) } } } } ================================================ FILE: crates/runc-shim/src/container.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::collections::HashMap; use async_trait::async_trait; use containerd_shim::{ api::Status, error::Result, protos::{ api::{CreateTaskRequest, ExecProcessRequest, ProcessInfo, StateResponse}, cgroups::metrics::Metrics, protobuf::{well_known_types::any::Any, EnumOrUnknown, Message, MessageDyn}, shim::oci::ProcessDetails, }, Error, }; use log::debug; use oci_spec::runtime::LinuxResources; use time::OffsetDateTime; use tokio::sync::oneshot::Receiver; use super::processes::Process; #[async_trait] pub trait Container { async fn start(&mut self, exec_id: Option<&str>) -> Result; async fn state(&self, exec_id: Option<&str>) -> Result; async fn kill(&mut self, exec_id: Option<&str>, signal: u32, all: bool) -> Result<()>; async fn wait_channel(&mut self, exec_id: Option<&str>) -> Result>; async fn get_exit_info( &self, exec_id: Option<&str>, ) -> Result<(i32, i32, Option)>; async fn delete( &mut self, exec_id_opt: Option<&str>, ) -> Result<(i32, i32, Option)>; async fn exec(&mut self, req: ExecProcessRequest) -> Result<()>; async fn resize_pty(&mut self, exec_id: Option<&str>, height: u32, width: u32) -> Result<()>; async fn pid(&self) -> i32; async fn id(&self) -> String; async fn update(&mut self, resources: &LinuxResources) -> Result<()>; async fn stats(&self) -> Result; async fn all_processes(&self) -> Result>; async fn close_io(&mut self, exec_id: Option<&str>) -> Result<()>; async fn pause(&mut self) -> Result<()>; async fn resume(&mut self) -> Result<()>; async fn init_state(&self) -> EnumOrUnknown; } #[async_trait] pub trait ContainerFactory { async fn create(&self, ns: &str, req: &CreateTaskRequest) -> Result; async fn cleanup(&self, ns: &str, c: &C) -> Result<()>; } #[async_trait] pub trait ProcessFactory { async fn create(&self, req: &ExecProcessRequest) -> Result; } /// ContainerTemplate is a template struct to implement Container, /// most of the methods can be delegated to either init process or exec process. /// that's why we provides a ContainerTemplate struct, /// library users only need to implements Process for their own. pub struct ContainerTemplate { /// container id pub id: String, /// container bundle path pub bundle: String, /// init process of this container pub init: T, /// process factory that create processes when exec pub process_factory: P, /// exec processes of this container pub processes: HashMap, } #[async_trait] impl Container for ContainerTemplate where T: Process + Send + Sync, E: Process + Send + Sync, P: ProcessFactory + Send + Sync, { async fn init_state(&self) -> EnumOrUnknown { // Default should be unknown self.init.state().await.unwrap_or_default().status } async fn start(&mut self, exec_id: Option<&str>) -> Result { let process = self.get_mut_process(exec_id)?; process.start().await?; Ok(process.pid().await) } async fn state(&self, exec_id: Option<&str>) -> Result { let process = self.get_process(exec_id)?; let mut resp = process.state().await?; let init_state = self.init.state().await?.status; if init_state == EnumOrUnknown::new(Status::PAUSING) || init_state == EnumOrUnknown::new(Status::PAUSED) { resp.status = init_state; } resp.bundle = self.bundle.to_string(); debug!("container state: {:?}", resp); Ok(resp) } async fn kill(&mut self, exec_id: Option<&str>, signal: u32, all: bool) -> Result<()> { let process = self.get_mut_process(exec_id)?; process.kill(signal, all).await } async fn wait_channel(&mut self, exec_id: Option<&str>) -> Result> { let process = self.get_mut_process(exec_id)?; process.wait_channel().await } async fn get_exit_info( &self, exec_id: Option<&str>, ) -> Result<(i32, i32, Option)> { let process = self.get_process(exec_id)?; Ok(( process.pid().await, process.exit_code().await, process.exited_at().await, )) } async fn delete( &mut self, exec_id_opt: Option<&str>, ) -> Result<(i32, i32, Option)> { let (pid, code, exited_at) = self.get_exit_info(exec_id_opt).await?; let process = self.get_mut_process(exec_id_opt); match process { Ok(p) => p.delete().await?, Err(e) => return Err(e), } if let Some(exec_id) = exec_id_opt { self.processes.remove(exec_id); } Ok((pid, code, exited_at)) } async fn exec(&mut self, req: ExecProcessRequest) -> Result<()> { let exec_id = req.exec_id.to_string(); let exec_process = self.process_factory.create(&req).await?; self.processes.insert(exec_id, exec_process); Ok(()) } async fn resize_pty(&mut self, exec_id: Option<&str>, height: u32, width: u32) -> Result<()> { let process = self.get_mut_process(exec_id)?; process.resize_pty(height, width).await } async fn pid(&self) -> i32 { self.init.pid().await } async fn id(&self) -> String { self.id.to_string() } #[cfg(target_os = "linux")] async fn update(&mut self, resources: &LinuxResources) -> Result<()> { self.init.update(resources).await } #[cfg(not(target_os = "linux"))] async fn update(&mut self, _resources: &LinuxResources) -> Result<()> { Err(Error::Unimplemented("update".to_string())) } #[cfg(target_os = "linux")] async fn stats(&self) -> Result { self.init.stats().await } #[cfg(not(target_os = "linux"))] async fn stats(&self) -> Result { Err(Error::Unimplemented("stats".to_string())) } async fn all_processes(&self) -> Result> { let mut processes_info = self.init.ps().await?; for process_info in &mut processes_info { for (exec_id, process) in &self.processes { if process_info.pid as i32 == process.pid().await { let process_details = ProcessDetails { exec_id: exec_id.to_string(), special_fields: Default::default(), }; let v = Any { type_url: process_details.descriptor_dyn().full_name().to_string(), value: process_details.write_to_bytes()?, special_fields: Default::default(), }; process_info.set_info(v); break; } } } Ok(processes_info) } async fn close_io(&mut self, exec_id: Option<&str>) -> Result<()> { let process = self.get_mut_process(exec_id)?; process.close_io().await } async fn pause(&mut self) -> Result<()> { self.init.pause().await } async fn resume(&mut self) -> Result<()> { self.init.resume().await } } impl ContainerTemplate where T: Process + Send + Sync, E: Process + Send + Sync, { pub fn get_process(&self, exec_id: Option<&str>) -> Result<&(dyn Process + Send + Sync)> { match exec_id { Some(exec_id) => { let p = self.processes.get(exec_id).ok_or_else(|| { Error::NotFoundError("can not find the exec by id".to_string()) })?; Ok(p) } None => Ok(&self.init), } } pub fn get_mut_process( &mut self, exec_id: Option<&str>, ) -> Result<&mut (dyn Process + Send + Sync)> { match exec_id { Some(exec_id) => { let p = self.processes.get_mut(exec_id).ok_or_else(|| { Error::NotFoundError(format!("can not find the exec by id {}", exec_id)) })?; Ok(p) } None => Ok(&mut self.init), } } } ================================================ FILE: crates/runc-shim/src/io.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #[derive(Clone, Debug, Default)] pub struct Stdio { pub stdin: String, pub stdout: String, pub stderr: String, pub terminal: bool, } impl Stdio { pub fn new(stdin: &str, stdout: &str, stderr: &str, terminal: bool) -> Self { Self { stdin: stdin.to_string(), stdout: stdout.to_string(), stderr: stderr.to_string(), terminal, } } pub fn is_null(&self) -> bool { self.stdin.is_empty() && self.stdout.is_empty() && self.stderr.is_empty() } } ================================================ FILE: crates/runc-shim/src/main.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{env, io::Write}; use containerd_shim::{ asynchronous::run, parse, protos::protobuf::{well_known_types::any::Any, Message}, run_info, }; mod cgroup_memory; mod common; mod console; mod container; mod io; mod processes; mod runc; mod service; mod task; use service::Service; fn parse_version() { let os_args: Vec<_> = env::args_os().collect(); let flags = match parse(&os_args[1..]) { Ok(flags) => flags, Err(e) => { eprintln!("Error parsing arguments: {}", e); std::process::exit(1); } }; if flags.version { println!("{}:", os_args[0].to_string_lossy()); println!(" Version: {}", env!("CARGO_PKG_VERSION")); println!(" Revision: {}", env!("CARGO_GIT_HASH")); println!(); std::process::exit(0); } if flags.info { let r = run_info(); match r { Ok(rinfo) => { let mut info = Any::new(); info.type_url = "io.containerd.runc.v2.Info".to_string(); info.value = match rinfo.write_to_bytes() { Ok(bytes) => bytes, Err(e) => { eprintln!("Failed to write runtime info to bytes: {}", e); std::process::exit(1); } }; std::io::stdout() .write_all(info.write_to_bytes().unwrap().as_slice()) .expect("Failed to write to stdout"); } Err(_) => { eprintln!("Failed to get runtime info"); std::process::exit(1); } } std::process::exit(0); } } #[tokio::main] async fn main() { parse_version(); run::("io.containerd.runc.v2-rs", None).await; } ================================================ FILE: crates/runc-shim/src/processes.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::sync::{Arc, Mutex}; use async_trait::async_trait; use containerd_shim::{ protos::{ api::{ProcessInfo, StateResponse, Status}, cgroups::metrics::Metrics, protobuf::well_known_types::timestamp::Timestamp, }, Console, Result, }; use oci_spec::runtime::LinuxResources; use rustix::termios::{tcsetwinsize, Winsize}; use time::OffsetDateTime; use tokio::{ fs::File, sync::oneshot::{channel, Receiver, Sender}, }; use crate::io::Stdio; #[allow(dead_code)] #[async_trait] pub trait Process { async fn start(&mut self) -> Result<()>; async fn set_exited(&mut self, exit_code: i32); async fn pid(&self) -> i32; async fn state(&self) -> Result; async fn kill(&mut self, signal: u32, all: bool) -> Result<()>; async fn delete(&mut self) -> Result<()>; async fn wait_channel(&mut self) -> Result>; async fn exit_code(&self) -> i32; async fn exited_at(&self) -> Option; async fn resize_pty(&mut self, height: u32, width: u32) -> Result<()>; async fn update(&mut self, resources: &LinuxResources) -> Result<()>; async fn stats(&self) -> Result; async fn ps(&self) -> Result>; async fn close_io(&mut self) -> Result<()>; async fn pause(&mut self) -> Result<()>; async fn resume(&mut self) -> Result<()>; async fn id(&self) -> &str; } #[allow(dead_code)] #[async_trait] pub trait ProcessLifecycle { async fn start(&self, p: &mut P) -> Result<()>; async fn kill(&self, p: &mut P, signal: u32, all: bool) -> Result<()>; async fn delete(&self, p: &mut P) -> Result<()>; async fn update(&self, p: &mut P, resources: &LinuxResources) -> Result<()>; async fn stats(&self, p: &P) -> Result; async fn ps(&self, p: &P) -> Result>; async fn pause(&self, p: &mut P) -> Result<()>; async fn resume(&self, p: &mut P) -> Result<()>; } pub struct ProcessTemplate { pub state: Status, pub id: String, pub stdio: Stdio, pub pid: i32, pub exit_code: i32, pub exited_at: Option, pub wait_chan_tx: Vec>, pub console: Option, pub lifecycle: Arc, pub stdin: Arc>>, } impl ProcessTemplate { pub fn new(id: &str, stdio: Stdio, lifecycle: S) -> Self { Self { state: Status::CREATED, id: id.to_string(), stdio, pid: 0, exit_code: 0, exited_at: None, wait_chan_tx: vec![], console: None, lifecycle: Arc::new(lifecycle), stdin: Arc::new(Mutex::new(None)), } } } #[async_trait] impl Process for ProcessTemplate where S: ProcessLifecycle + Sync + Send, { async fn start(&mut self) -> Result<()> { self.lifecycle.clone().start(self).await?; Ok(()) } async fn set_exited(&mut self, exit_code: i32) { self.state = Status::STOPPED; self.exit_code = exit_code; self.exited_at = Some(OffsetDateTime::now_utc()); // set wait_chan_tx to empty, to trigger the drop of the initialized Receiver. self.wait_chan_tx = vec![]; } async fn pid(&self) -> i32 { self.pid } async fn id(&self) -> &str { self.id.as_str() } async fn state(&self) -> Result { let mut resp = StateResponse::new(); resp.id = self.id.to_string(); resp.set_status(self.state); resp.pid = self.pid as u32; resp.terminal = self.stdio.terminal; resp.stdin = self.stdio.stdin.to_string(); resp.stdout = self.stdio.stdout.to_string(); resp.stderr = self.stdio.stderr.to_string(); resp.exit_status = self.exit_code as u32; if let Some(exit_at) = self.exited_at { let mut time_stamp = Timestamp::new(); time_stamp.seconds = exit_at.unix_timestamp(); time_stamp.nanos = exit_at.nanosecond() as i32; resp.exited_at = Some(time_stamp).into(); } Ok(resp) } async fn kill(&mut self, signal: u32, all: bool) -> Result<()> { self.lifecycle.clone().kill(self, signal, all).await } async fn delete(&mut self) -> Result<()> { self.lifecycle.clone().delete(self).await } async fn wait_channel(&mut self) -> Result> { let (tx, rx) = channel::<()>(); if self.state != Status::STOPPED { self.wait_chan_tx.push(tx); } Ok(rx) } async fn exit_code(&self) -> i32 { self.exit_code } async fn exited_at(&self) -> Option { self.exited_at } async fn resize_pty(&mut self, height: u32, width: u32) -> Result<()> { if let Some(console) = self.console.as_ref() { let w = Winsize { ws_row: height as u16, ws_col: width as u16, ws_xpixel: 0, ws_ypixel: 0, }; tcsetwinsize(&console.file, w) .map_err(|e| containerd_shim::Error::Other(e.to_string()))?; } Ok(()) } async fn update(&mut self, resources: &LinuxResources) -> Result<()> { self.lifecycle.clone().update(self, resources).await } async fn stats(&self) -> Result { self.lifecycle.stats(self).await } async fn ps(&self) -> Result> { self.lifecycle.ps(self).await } async fn close_io(&mut self) -> Result<()> { let mut lock_guard = self.stdin.lock().unwrap(); if let Some(stdin_w_file) = lock_guard.take() { drop(stdin_w_file); } Ok(()) } async fn pause(&mut self) -> Result<()> { self.lifecycle.clone().pause(self).await } async fn resume(&mut self) -> Result<()> { self.lifecycle.clone().resume(self).await } } ================================================ FILE: crates/runc-shim/src/runc.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #[cfg(target_os = "linux")] use std::sync::RwLock; use std::{ convert::TryFrom, os::{ fd::{IntoRawFd, OwnedFd}, unix::{ io::{AsRawFd, FromRawFd}, prelude::ExitStatusExt, }, }, path::{Path, PathBuf}, process::ExitStatus, sync::{Arc, Mutex}, }; use async_trait::async_trait; #[cfg(target_os = "linux")] use cgroups_rs::fs::Cgroup; use containerd_shim::{ api::{CreateTaskRequest, ExecProcessRequest, Options, Status}, asynchronous::monitor::{monitor_subscribe, monitor_unsubscribe, Subscription}, io_error, monitor::{ExitEvent, Subject, Topic}, mount::umount_recursive, other, other_error, protos::{ api::ProcessInfo, cgroups::metrics::Metrics, protobuf::{CodedInputStream, Message}, }, util::{asyncify, mkdir, mount_rootfs, read_file_to_str, write_options, write_runtime}, Console, Error, ExitSignal, Result, }; use log::{debug, error}; use nix::{sys::signal::kill, unistd::Pid}; use oci_spec::runtime::{LinuxResources, Process}; use runc::{Command, Runc, Spawner}; use tokio::{ fs::{remove_file, File, OpenOptions}, io::{AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncWrite, BufReader}, }; use super::{ console::ConsoleSocket, container::{ContainerFactory, ContainerTemplate, ProcessFactory}, processes::{ProcessLifecycle, ProcessTemplate}, }; use crate::{ common::{ check_kill_error, create_io, create_runc, get_spec_from_request, handle_file_open, receive_socket, CreateConfig, Log, ProcessIO, ShimExecutor, INIT_PID_FILE, LOG_JSON_FILE, }, io::Stdio, }; pub type ExecProcess = ProcessTemplate; pub type InitProcess = ProcessTemplate; pub type RuncContainer = ContainerTemplate; #[derive(Clone, Default)] pub(crate) struct RuncFactory {} #[async_trait] impl ContainerFactory for RuncFactory { async fn create( &self, ns: &str, req: &CreateTaskRequest, ) -> containerd_shim::Result { let bundle = req.bundle(); let mut opts = Options::new(); if let Some(any) = req.options.as_ref() { let mut input = CodedInputStream::from_bytes(any.value.as_ref()); opts.merge_from(&mut input)?; } if opts.compute_size() > 0 { debug!("create options: {:?}", &opts); } let runtime = opts.binary_name.as_str(); write_options(bundle, &opts).await?; write_runtime(bundle, runtime).await?; let rootfs_vec = req.rootfs().to_vec(); let rootfs = if !rootfs_vec.is_empty() { let tmp_rootfs = Path::new(bundle).join("rootfs"); mkdir(&tmp_rootfs, 0o711).await?; tmp_rootfs } else { PathBuf::new() }; for m in rootfs_vec { mount_rootfs(&m, rootfs.as_path()).await? } let runc = create_runc( runtime, ns, bundle, &opts, Some(Arc::new(ShimExecutor::default())), )?; let id = req.id(); let stdio = Stdio::new(req.stdin(), req.stdout(), req.stderr(), req.terminal()); let mut init = InitProcess::new( id, stdio, RuncInitLifecycle::new(runc.clone(), opts.clone(), bundle), ); let config = CreateConfig::default(); self.do_create(&mut init, config).await?; #[cfg(target_os = "linux")] { *init.lifecycle.cgroup_cache.write().unwrap() = containerd_shim::cgroup::get_cgroup(init.pid as u32).ok(); } let container = RuncContainer { id: id.to_string(), bundle: bundle.to_string(), init, process_factory: RuncExecFactory { runtime: runc, bundle: bundle.to_string(), io_uid: opts.io_uid, io_gid: opts.io_gid, }, processes: Default::default(), }; Ok(container) } async fn cleanup(&self, _ns: &str, _c: &RuncContainer) -> containerd_shim::Result<()> { Ok(()) } } impl RuncFactory { async fn do_create(&self, init: &mut InitProcess, _config: CreateConfig) -> Result<()> { let id = init.id.to_string(); let stdio = &init.stdio; let opts = &init.lifecycle.opts; let bundle = &init.lifecycle.bundle; let pid_path = Path::new(bundle).join(INIT_PID_FILE); let mut create_opts = runc::options::CreateOpts::new() .pid_file(&pid_path) .no_pivot(opts.no_pivot_root) .no_new_keyring(opts.no_new_keyring) .detach(false); let (socket, pio) = if stdio.terminal { let s = ConsoleSocket::new().await?; create_opts.console_socket = Some(s.path.to_owned()); (Some(s), None) } else { let pio = create_io(&id, opts.io_uid, opts.io_gid, stdio)?; create_opts.io = pio.io.as_ref().cloned(); (None, Some(pio)) }; let resp = init .lifecycle .runtime .create(&id, bundle, Some(&create_opts)) .await; if let Err(e) = resp { if let Some(s) = socket { s.clean().await; } return Err(runtime_error(bundle, e, "OCI runtime create failed").await); } copy_io_or_console(init, socket, pio, init.lifecycle.exit_signal.clone()).await?; let pid = read_file_to_str(pid_path).await?.parse::()?; init.pid = pid; Ok(()) } } // runtime_error will read the OCI runtime logfile retrieving OCI runtime error pub async fn runtime_error(bundle: &str, e: runc::error::Error, msg: &str) -> Error { let mut rt_msg = String::new(); match File::open(Path::new(bundle).join(LOG_JSON_FILE)).await { Err(err) => other!("{}: unable to open OCI runtime log file){}", msg, err), Ok(file) => { let mut lines = BufReader::new(file).lines(); while let Ok(Some(line)) = lines.next_line().await { // Retrieve the last runtime error match serde_json::from_str::(&line) { Err(err) => return other!("{}: unable to parse log msg: {}", msg, err), Ok(log) => { if log.level == "error" { rt_msg = log.msg.trim().to_string(); } } } } if !rt_msg.is_empty() { other!("{}: {}", msg, rt_msg) } else { other!("{}: (no OCI runtime error in logfile) {}", msg, e) } } } } pub struct RuncExecFactory { runtime: Runc, bundle: String, io_uid: u32, io_gid: u32, } #[async_trait] impl ProcessFactory for RuncExecFactory { async fn create(&self, req: &ExecProcessRequest) -> Result { let p = get_spec_from_request(req)?; Ok(ExecProcess { state: Status::CREATED, id: req.exec_id.to_string(), stdio: Stdio { stdin: req.stdin.to_string(), stdout: req.stdout.to_string(), stderr: req.stderr.to_string(), terminal: req.terminal, }, pid: 0, exit_code: 0, exited_at: None, wait_chan_tx: vec![], console: None, lifecycle: Arc::from(RuncExecLifecycle { runtime: self.runtime.clone(), bundle: self.bundle.to_string(), container_id: req.id.to_string(), io_uid: self.io_uid, io_gid: self.io_gid, spec: p, exit_signal: Default::default(), }), stdin: Arc::new(Mutex::new(None)), }) } } pub struct RuncInitLifecycle { runtime: Runc, opts: Options, bundle: String, exit_signal: Arc, /// Cache for cgroup paths to avoid repeated /proc//cgroup parsing #[cfg(target_os = "linux")] cgroup_cache: RwLock>, } #[async_trait] impl ProcessLifecycle for RuncInitLifecycle { async fn start(&self, p: &mut InitProcess) -> containerd_shim::Result<()> { if let Err(e) = self.runtime.start(p.id.as_str()).await { return Err(runtime_error(&p.lifecycle.bundle, e, "OCI runtime start failed").await); } p.state = Status::RUNNING; Ok(()) } async fn kill( &self, p: &mut InitProcess, signal: u32, all: bool, ) -> containerd_shim::Result<()> { self.runtime .kill( p.id.as_str(), signal, Some(&runc::options::KillOpts { all }), ) .await .map_err(|e| check_kill_error(e.to_string())) } async fn delete(&self, p: &mut InitProcess) -> containerd_shim::Result<()> { if let Err(e) = self .runtime .delete( p.id.as_str(), Some(&runc::options::DeleteOpts { force: true }), ) .await { if !e.to_string().to_lowercase().contains("does not exist") { return Err( runtime_error(&p.lifecycle.bundle, e, "OCI runtime delete failed").await, ); } } umount_recursive(Path::new(&self.bundle).join("rootfs").to_str(), 0)?; self.exit_signal.signal(); Ok(()) } #[cfg(target_os = "linux")] async fn update(&self, p: &mut InitProcess, resources: &LinuxResources) -> Result<()> { if p.pid <= 0 { return Err(other!( "failed to update resources because init process is {}", p.pid )); } // Check if cgroup still exists before attempting update if !self.ensure_init_cgroup_exists().await { return Err(other!( "failed to update resources because cgroup for process {} has been released", p.pid )); } let cgroup_guard = p.lifecycle.cgroup_cache.read().unwrap(); let cgroup = cgroup_guard .as_ref() .ok_or_else(|| other!("cgroup cache is empty for process {}", p.pid))?; containerd_shim::cgroup::update_resources(cgroup, resources) } #[cfg(not(target_os = "linux"))] async fn update(&self, _p: &mut InitProcess, _resources: &LinuxResources) -> Result<()> { Err(Error::Unimplemented("update resource".to_string())) } #[cfg(target_os = "linux")] async fn stats(&self, p: &InitProcess) -> Result { if p.pid <= 0 { return Err(other!( "failed to collect metrics because init process is {}", p.pid )); } // Check if cgroup still exists before attempting to collect stats if !self.ensure_init_cgroup_exists().await { return Err(other!( "failed to collect metrics because cgroup for process {} has been released", p.pid )); } let cgroup_guard = p.lifecycle.cgroup_cache.read().unwrap(); let cgroup = cgroup_guard .as_ref() .ok_or_else(|| other!("cgroup cache is empty for process {}", p.pid))?; containerd_shim::cgroup::collect_metrics(cgroup) } #[cfg(not(target_os = "linux"))] async fn stats(&self, _p: &InitProcess) -> Result { Err(Error::Unimplemented("process stats".to_string())) } async fn ps(&self, p: &InitProcess) -> Result> { let pids = self .runtime .ps(&p.id) .await .map_err(other_error!("failed to execute runc ps"))?; Ok(pids .iter() .map(|&x| ProcessInfo { pid: x as u32, ..Default::default() }) .collect()) } #[cfg(target_os = "linux")] async fn pause(&self, p: &mut InitProcess) -> Result<()> { match p.state { Status::RUNNING => { p.state = Status::PAUSING; if let Err(e) = self.runtime.pause(p.id.as_str()).await { p.state = Status::RUNNING; return Err(runtime_error(&self.bundle, e, "OCI runtime pause failed").await); } p.state = Status::PAUSED; Ok(()) } _ => Err(other!("cannot pause when in {:?} state", p.state)), } } #[cfg(not(target_os = "linux"))] async fn pause(&self, _p: &mut InitProcess) -> Result<()> { Err(Error::Unimplemented("pause".to_string())) } #[cfg(target_os = "linux")] async fn resume(&self, p: &mut InitProcess) -> Result<()> { match p.state { Status::PAUSED => { if let Err(e) = self.runtime.resume(p.id.as_str()).await { return Err(runtime_error(&self.bundle, e, "OCI runtime pause failed").await); } p.state = Status::RUNNING; Ok(()) } _ => Err(other!("cannot resume when in {:?} state", p.state)), } } #[cfg(not(target_os = "linux"))] async fn resume(&self, _p: &mut InitProcess) -> Result<()> { Err(Error::Unimplemented("resume".to_string())) } } impl RuncInitLifecycle { pub fn new(runtime: Runc, opts: Options, bundle: &str) -> Self { Self { runtime, opts, bundle: bundle.to_string(), exit_signal: Default::default(), #[cfg(target_os = "linux")] cgroup_cache: RwLock::new(None), } } /// Ensure cgroup exists and cache the path information /// Returns true if cgroup exists, false if released #[cfg(target_os = "linux")] async fn ensure_init_cgroup_exists(&self) -> bool { let cache = self.cgroup_cache.read().unwrap(); if let Some(ref cached) = *cache { cached.exists() } else { false } } } pub struct RuncExecLifecycle { runtime: Runc, bundle: String, container_id: String, io_uid: u32, io_gid: u32, spec: Process, exit_signal: Arc, } #[async_trait] impl ProcessLifecycle for RuncExecLifecycle { async fn start(&self, p: &mut ExecProcess) -> containerd_shim::Result<()> { let bundle = self.bundle.to_string(); let pid_path = Path::new(&bundle).join(format!("{}.pid", &p.id)); let mut exec_opts = runc::options::ExecOpts { io: None, pid_file: Some(pid_path.to_owned()), console_socket: None, detach: true, }; let (socket, pio) = if p.stdio.terminal { let s = ConsoleSocket::new().await?; exec_opts.console_socket = Some(s.path.to_owned()); (Some(s), None) } else { let pio = create_io(&p.id, self.io_uid, self.io_gid, &p.stdio)?; exec_opts.io = pio.io.as_ref().cloned(); (None, Some(pio)) }; //TODO checkpoint support let exec_result = self .runtime .exec(&self.container_id, &self.spec, Some(&exec_opts)) .await; if let Err(e) = exec_result { if let Some(s) = socket { s.clean().await; } return Err(runtime_error(&bundle, e, "OCI runtime exec failed").await); } if !p.stdio.stdin.is_empty() { let stdin_clone = p.stdio.stdin.clone(); let stdin_w = p.stdin.clone(); // Open the write side in advance to make sure read side will not block, // open it in another thread otherwise it will block too. tokio::spawn(async move { if let Ok(stdin_w_file) = OpenOptions::new() .write(true) .open(stdin_clone.as_str()) .await { let mut lock_guard = stdin_w.lock().unwrap(); *lock_guard = Some(stdin_w_file); } }); } copy_io_or_console(p, socket, pio, p.lifecycle.exit_signal.clone()).await?; let pid = read_file_to_str(pid_path).await?.parse::()?; p.pid = pid; p.state = Status::RUNNING; Ok(()) } async fn kill( &self, p: &mut ExecProcess, signal: u32, _all: bool, ) -> containerd_shim::Result<()> { if p.pid <= 0 { Err(Error::FailedPreconditionError( "process not created".to_string(), )) } else if p.exited_at.is_some() { Err(Error::NotFoundError("process already finished".to_string())) } else { let pid = p.pid; let kill_future = tokio::task::spawn_blocking(move || { kill( Pid::from_raw(pid), nix::sys::signal::Signal::try_from(signal as i32).unwrap(), ) }); match tokio::time::timeout(std::time::Duration::from_secs(3), kill_future).await { Ok(Ok(result)) => result.map_err(Into::into), Ok(Err(e)) => Err(Error::Other(format!("kill task error: {}", e))), Err(_) => { debug!( "kill operation timed out for pid {}, signal {}", pid, signal ); // timeout also return ok // For termination signals, it may have taken effect even if it timed out if signal == 9 || signal == 15 { Ok(()) } else { Err(Error::DeadlineExceeded( "kill operation timed out".to_string(), )) } } } } } async fn delete(&self, p: &mut ExecProcess) -> Result<()> { self.exit_signal.signal(); let exec_pid_path = Path::new(self.bundle.as_str()).join(format!("{}.pid", p.id)); remove_file(exec_pid_path).await.unwrap_or_default(); Ok(()) } async fn update(&self, _p: &mut ExecProcess, _resources: &LinuxResources) -> Result<()> { Err(Error::Unimplemented("exec update".to_string())) } async fn stats(&self, _p: &ExecProcess) -> Result { Err(Error::Unimplemented("exec stats".to_string())) } async fn ps(&self, _p: &ExecProcess) -> Result> { Err(Error::Unimplemented("exec ps".to_string())) } async fn pause(&self, _p: &mut ExecProcess) -> Result<()> { Err(Error::Unimplemented("exec pause".to_string())) } async fn resume(&self, _p: &mut ExecProcess) -> Result<()> { Err(Error::Unimplemented("exec resume".to_string())) } } async fn copy_console( console_socket: &ConsoleSocket, stdio: &Stdio, exit_signal: Arc, ) -> Result { debug!("copy_console: waiting for runtime to send console fd"); let stream = console_socket.accept().await?; let fd = asyncify(move || -> Result { receive_socket(stream.as_raw_fd()) }).await?; let f = unsafe { File::from_raw_fd(fd.into_raw_fd()) }; if !stdio.stdin.is_empty() { debug!("copy_console: pipe stdin to console"); let console_stdin = unsafe { tokio::fs::File::from_raw_fd(f.as_raw_fd()) }; let stdin = handle_file_open(|| async { OpenOptions::new() .read(true) .open(stdio.stdin.as_str()) .await }) .await .map_err(io_error!(e, "failed to open stdin"))?; spawn_copy(stdin, console_stdin, exit_signal.clone(), None::); } if !stdio.stdout.is_empty() { let console_stdout = unsafe { tokio::fs::File::from_raw_fd(f.as_raw_fd()) }; debug!("copy_console: pipe stdout from console"); let stdout = OpenOptions::new() .write(true) .open(stdio.stdout.as_str()) .await .map_err(io_error!(e, "open stdout"))?; // open a read to make sure even if the read end of containerd shutdown, // copy still continue until the restart of containerd succeed let stdout_r = OpenOptions::new() .read(true) .open(stdio.stdout.as_str()) .await .map_err(io_error!(e, "open stdout for read"))?; spawn_copy( console_stdout, stdout, exit_signal, Some(move || { drop(stdout_r); }), ); } let console = Console { file: f.into_std().await, }; Ok(console) } pub async fn copy_io(pio: &ProcessIO, stdio: &Stdio, exit_signal: Arc) -> Result<()> { if !pio.copy { return Ok(()); }; if let Some(io) = &pio.io { if let Some(w) = io.stdin() { debug!("copy_io: pipe stdin from {}", stdio.stdin.as_str()); if !stdio.stdin.is_empty() { let stdin = handle_file_open(|| async { OpenOptions::new() .read(true) .open(stdio.stdin.as_str()) .await }) .await .map_err(io_error!(e, "open stdin"))?; spawn_copy(stdin, w, exit_signal.clone(), None::); } } if let Some(r) = io.stdout() { debug!("copy_io: pipe stdout from to {}", stdio.stdout.as_str()); if !stdio.stdout.is_empty() { let stdout = handle_file_open(|| async { OpenOptions::new() .write(true) .open(stdio.stdout.as_str()) .await }) .await .map_err(io_error!(e, "open stdout"))?; // open a read to make sure even if the read end of containerd shutdown, // copy still continue until the restart of containerd succeed let stdout_r = handle_file_open(|| async { OpenOptions::new() .read(true) .open(stdio.stdout.as_str()) .await }) .await .map_err(io_error!(e, "open stdout for read"))?; spawn_copy( r, stdout, exit_signal.clone(), Some(move || { drop(stdout_r); }), ); } } if let Some(r) = io.stderr() { if !stdio.stderr.is_empty() { debug!("copy_io: pipe stderr from to {}", stdio.stderr.as_str()); let stderr = handle_file_open(|| async { OpenOptions::new() .write(true) .open(stdio.stderr.as_str()) .await }) .await .map_err(io_error!(e, "open stderr"))?; // open a read to make sure even if the read end of containerd shutdown, // copy still continue until the restart of containerd succeed let stderr_r = handle_file_open(|| async { OpenOptions::new() .read(true) .open(stdio.stderr.as_str()) .await }) .await .map_err(io_error!(e, "open stderr for read"))?; spawn_copy( r, stderr, exit_signal, Some(move || { drop(stderr_r); }), ); } } } Ok(()) } fn spawn_copy(from: R, to: W, exit_signal: Arc, on_close: Option) where R: AsyncRead + Send + Unpin + 'static, W: AsyncWrite + Send + Unpin + 'static, F: FnOnce() + Send + 'static, { let mut src = from; let mut dst = to; tokio::spawn(async move { tokio::select! { _ = exit_signal.wait() => { debug!("container exit, copy task should exit too"); }, res = tokio::io::copy(&mut src, &mut dst) => { if let Err(e) = res { error!("copy io failed {}", e); } } } if let Some(f) = on_close { f(); } }); } async fn copy_io_or_console

( p: &mut ProcessTemplate

, socket: Option, pio: Option, exit_signal: Arc, ) -> Result<()> { if p.stdio.terminal { if let Some(console_socket) = socket { let console_result = copy_console(&console_socket, &p.stdio, exit_signal).await; console_socket.clean().await; match console_result { Ok(c) => { p.console = Some(c); } Err(e) => { return Err(e); } } } } else if let Some(pio) = pio { copy_io(&pio, &p.stdio, exit_signal).await?; } Ok(()) } #[async_trait] impl Spawner for ShimExecutor { async fn execute(&self, cmd: Command) -> runc::Result<(ExitStatus, u32, String, String)> { let mut cmd = cmd; let subscription = monitor_subscribe(Topic::Pid) .await .map_err(|e| runc::error::Error::Other(Box::new(e)))?; let sid = subscription.id; let child = match cmd.spawn() { Ok(c) => c, Err(e) => { monitor_unsubscribe(sid).await.unwrap_or_default(); return Err(runc::error::Error::ProcessSpawnFailed(e)); } }; let pid = child.id().unwrap(); let (stdout, stderr, exit_code) = tokio::join!( read_std(child.stdout), read_std(child.stderr), wait_pid(pid as i32, subscription) ); let status = ExitStatus::from_raw(exit_code); monitor_unsubscribe(sid).await.unwrap_or_default(); Ok((status, pid, stdout, stderr)) } } async fn read_std(std: Option) -> String where T: AsyncRead + Unpin, { let mut std = std; if let Some(mut std) = std.take() { let mut out = String::new(); std.read_to_string(&mut out).await.unwrap_or_else(|e| { error!("failed to read stdout {}", e); 0 }); return out; } "".to_string() } async fn wait_pid(pid: i32, s: Subscription) -> i32 { let mut s = s; loop { if let Some(ExitEvent { subject: Subject::Pid(epid), exit_code: code, }) = s.rx.recv().await { if pid == epid { monitor_unsubscribe(s.id).await.unwrap_or_default(); return code; } } } } #[cfg(test)] mod tests { use std::{os::unix::process::ExitStatusExt, path::Path, process::ExitStatus}; use containerd_shim::util::{mkdir, write_str_to_file}; use runc::error::Error::CommandFailed; use tokio::fs::remove_dir_all; use crate::{common::LOG_JSON_FILE, runc::runtime_error}; #[tokio::test] async fn test_runtime_error() { let empty_err = CommandFailed { status: ExitStatus::from_raw(1), stdout: "".to_string(), stderr: "".to_string(), }; let log_json = "\ {\"level\":\"info\",\"msg\":\"hello world\",\"time\":\"2022-11-25\"}\n\ {\"level\":\"error\",\"msg\":\"failed error\",\"time\":\"2022-11-26\"}\n\ {\"level\":\"error\",\"msg\":\"panic\",\"time\":\"2022-11-27\"}\n\ "; let test_dir = "/tmp/shim-test"; let _ = mkdir(test_dir, 0o744).await; write_str_to_file(Path::new(test_dir).join(LOG_JSON_FILE).as_path(), log_json) .await .expect("write log json should not be error"); let expectd_msg = "panic"; let actual_err = runtime_error(test_dir, empty_err, "").await; remove_dir_all(test_dir) .await .expect("remove test dir should not be error"); assert!( actual_err.to_string().contains(expectd_msg), "actual error \"{}\" should contains \"{}\"", actual_err, expectd_msg ); } } ================================================ FILE: crates/runc-shim/src/service.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{env::current_dir, sync::Arc, time::Duration}; use ::runc::options::DeleteOpts; use async_trait::async_trait; use containerd_shim::{ asynchronous::{ monitor::{monitor_subscribe, monitor_unsubscribe, Subscription}, publisher::RemotePublisher, spawn, ExitSignal, Shim, }, event::Event, io_error, monitor::{Subject, Topic}, mount::umount_recursive, protos::{events::task::TaskExit, protobuf::MessageDyn, ttrpc::context::with_duration}, util::{ convert_to_timestamp, read_options, read_pid_from_file, read_runtime, read_spec, timestamp, write_str_to_file, }, Config, DeleteResponse, Error, Flags, StartOpts, }; use log::{debug, error, warn}; use tokio::sync::mpsc::{channel, Receiver, Sender}; use crate::{ common::{create_runc, has_shared_pid_namespace, ShimExecutor, GROUP_LABELS, INIT_PID_FILE}, container::Container, processes::Process, runc::{RuncContainer, RuncFactory}, task::TaskService, }; pub(crate) struct Service { exit: Arc, id: String, namespace: String, } #[async_trait] impl Shim for Service { type T = TaskService; async fn new(_runtime_id: &str, args: &Flags, _config: &mut Config) -> Self { let exit = Arc::new(ExitSignal::default()); // TODO: add publisher Service { exit, id: args.id.to_string(), namespace: args.namespace.to_string(), } } async fn start_shim(&mut self, opts: StartOpts) -> containerd_shim::Result { let mut grouping = opts.id.clone(); let spec = read_spec("").await?; if let Some(annotations) = spec.annotations() { for &label in GROUP_LABELS.iter() { if let Some(value) = annotations.get(label) { grouping = value.to_string(); break; } } } #[cfg(not(target_os = "linux"))] let thp_disabled = String::new(); #[cfg(target_os = "linux")] // Our goal is to set thp disable = true on the shim side and then restore thp // disable before starting runc. So we only need to focus on the return value // of the function get_thp_disabled, which is Result. let thp_disabled = { let ret = unsafe { libc::prctl(libc::PR_GET_THP_DISABLE, 0, 0, 0, 0) }; if ret >= 0 { let was_disabled = ret > 0; // We don't care if the setting is successful, because even if the // setting failed, we should not exit the shim process. let _ = unsafe { libc::prctl(libc::PR_SET_THP_DISABLE, 1u64, 0, 0, 0) }; was_disabled.to_string() } else { String::new() } }; let vars: Vec<(&str, &str)> = vec![("THP_DISABLED", thp_disabled.as_str())]; let address = spawn(opts, &grouping, vars).await?; write_str_to_file("address", &address).await?; Ok(address) } async fn delete_shim(&mut self) -> containerd_shim::Result { let namespace = self.namespace.as_str(); let bundle = current_dir().map_err(io_error!(e, "get current dir"))?; let opts = read_options(&bundle).await?; let runtime = read_runtime(&bundle).await.unwrap_or_default(); let runc = create_runc( &runtime, namespace, &bundle, &opts, Some(Arc::new(ShimExecutor::default())), )?; let pid = read_pid_from_file(&bundle.join(INIT_PID_FILE)) .await .unwrap_or_default(); runc.delete(&self.id, Some(&DeleteOpts { force: true })) .await .unwrap_or_else(|e| warn!("failed to remove runc container: {}", e)); umount_recursive(bundle.join("rootfs").to_str(), 0) .unwrap_or_else(|e| warn!("failed to umount recursive rootfs: {}", e)); let mut resp = DeleteResponse::new(); // sigkill resp.set_exit_status(137); resp.set_exited_at(timestamp()?); resp.set_pid(pid as u32); Ok(resp) } async fn wait(&mut self) { self.exit.wait().await; } async fn create_task_service(&self, publisher: RemotePublisher) -> Self::T { let (tx, rx) = channel(128); let exit_clone = self.exit.clone(); let task = TaskService::new(&self.namespace, exit_clone, tx.clone()); let s = monitor_subscribe(Topic::Pid) .await .expect("monitor subscribe failed"); process_exits(s, &task, tx).await; forward(publisher, self.namespace.to_string(), rx).await; task } } async fn process_exits( s: Subscription, task: &TaskService, tx: Sender<(String, Box)>, ) { let containers = task.containers.clone(); let mut s = s; tokio::spawn(async move { while let Some(e) = s.rx.recv().await { if let Subject::Pid(pid) = e.subject { debug!("receive exit event: {}", &e); let exit_code = e.exit_code; for (_k, cont) in containers.write().await.iter_mut() { let bundle = cont.bundle.to_string(); let container_id = cont.id.clone(); let mut change_process: Vec<&mut (dyn Process + Send + Sync)> = Vec::new(); // pid belongs to container init process if cont.init.pid == pid { // kill all children process if the container has a private PID namespace if should_kill_all_on_exit(&bundle).await { cont.kill(None, 9, true).await.unwrap_or_else(|e| { error!("failed to kill init's children: {}", e) }); } if let Ok(process_d) = cont.get_mut_process(None) { change_process.push(process_d); } else { break; } } else { // pid belongs to container common process if let Some((_, p)) = cont.processes.iter_mut().find(|(_, p)| p.pid == pid) { change_process.push(p as &mut (dyn Process + Send + Sync)); } } let process_len = change_process.len(); for process in change_process { // set exit for process process.set_exited(exit_code).await; let code = process.exit_code().await; let exited_at = process.exited_at().await; // publish event let ts = convert_to_timestamp(exited_at); let event = TaskExit { container_id: container_id.clone(), id: process.id().await.to_string(), pid: process.pid().await as u32, exit_status: code as u32, exited_at: Some(ts).into(), ..Default::default() }; let topic = event.topic(); tx.send((topic.to_string(), Box::new(event))) .await .unwrap_or_else(|e| warn!("send {} to publisher: {}", topic, e)); } //if process has been find , no need to keep search if process_len != 0 { break; } } } } monitor_unsubscribe(s.id).await.unwrap_or_default(); }); } async fn forward( publisher: RemotePublisher, ns: String, mut rx: Receiver<(String, Box)>, ) { tokio::spawn(async move { while let Some((topic, e)) = rx.recv().await { // While ttrpc push the event,give it a 5 seconds timeout. // Prevent event reporting from taking too long time. // Learnd from goshim's containerd/runtime/v2/shim/publisher.go publisher .publish(with_duration(Duration::from_secs(5)), &topic, &ns, e) .await .unwrap_or_else(|e| warn!("publish {} to containerd: {}", topic, e)); } }); } async fn should_kill_all_on_exit(bundle_path: &str) -> bool { match read_spec(bundle_path).await { Ok(spec) => has_shared_pid_namespace(&spec), Err(e) => { error!( "failed to read spec when call should_kill_all_on_exit: {}", e ); false } } } ================================================ FILE: crates/runc-shim/src/task.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{collections::HashMap, sync::Arc}; use async_trait::async_trait; use containerd_shim::{ api::{ CreateTaskRequest, CreateTaskResponse, DeleteRequest, Empty, ExecProcessRequest, KillRequest, ResizePtyRequest, ShutdownRequest, StartRequest, StartResponse, StateRequest, StateResponse, Status, WaitRequest, WaitResponse, }, asynchronous::ExitSignal, event::Event, protos::{ api::{ CloseIORequest, ConnectRequest, ConnectResponse, DeleteResponse, PidsRequest, PidsResponse, StatsRequest, StatsResponse, UpdateTaskRequest, }, events::task::{TaskCreate, TaskDelete, TaskExecAdded, TaskExecStarted, TaskIO, TaskStart}, protobuf::{EnumOrUnknown, MessageDyn}, shim_async::Task, ttrpc::{self, r#async::TtrpcContext}, }, util::{convert_to_any, convert_to_timestamp, AsOption}, TtrpcResult, }; use log::{debug, info, warn}; use oci_spec::runtime::LinuxResources; use tokio::sync::{ mpsc::Sender, RwLock, RwLockMappedWriteGuard, RwLockReadGuard, RwLockWriteGuard, }; use super::container::{Container, ContainerFactory}; type EventSender = Sender<(String, Box)>; #[cfg(target_os = "linux")] use std::path::Path; #[cfg(target_os = "linux")] use cgroups_rs::fs::hierarchies::is_cgroup2_unified_mode; use containerd_shim::{ api::{PauseRequest, ResumeRequest}, protos::events::task::{TaskPaused, TaskResumed}, }; #[cfg(target_os = "linux")] use containerd_shim::{ error::{Error, Result}, other_error, protos::events::task::TaskOOM, }; #[cfg(target_os = "linux")] use log::error; #[cfg(target_os = "linux")] use tokio::{sync::mpsc::Receiver, task::spawn}; #[cfg(target_os = "linux")] use crate::cgroup_memory; /// TaskService is a Task template struct, it is considered a helper struct, /// which has already implemented `Task` trait, so that users can make it the type `T` /// parameter of `Service`, and implements their own `ContainerFactory` and `Container`. pub struct TaskService { pub factory: F, // In comparison, a Mutex does not distinguish between readers or writers that acquire the lock, // therefore causing any tasks waiting for the lock to become available to yield. // An RwLock will allow any number of readers to acquire the lock as long as a writer is not holding the lock. pub containers: Arc>>, pub namespace: String, pub exit: Arc, pub tx: EventSender, } impl TaskService where F: Default, { pub fn new(ns: &str, exit: Arc, tx: EventSender) -> Self { Self { factory: Default::default(), containers: Arc::new(RwLock::new(Default::default())), namespace: ns.to_string(), exit, tx, } } } impl TaskService { pub async fn container_mut(&self, id: &str) -> TtrpcResult> { let mut containers = self.containers.write().await; containers.get_mut(id).ok_or_else(|| { ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Code::NOT_FOUND, format!("can not find container by id {}", id), )) })?; let container = RwLockWriteGuard::map(containers, |m| m.get_mut(id).unwrap()); Ok(container) } pub async fn container(&self, id: &str) -> TtrpcResult> { let containers = self.containers.read().await; containers.get(id).ok_or_else(|| { ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Code::NOT_FOUND, format!("can not find container by id {}", id), )) })?; let container = RwLockReadGuard::map(containers, |m| m.get(id).unwrap()); Ok(container) } pub async fn send_event(&self, event: impl Event) { let topic = event.topic(); self.tx .send((topic.to_string(), Box::new(event))) .await .unwrap_or_else(|e| warn!("send {} to publisher: {}", topic, e)); } } #[cfg(target_os = "linux")] fn run_oom_monitor(mut rx: Receiver, id: String, tx: EventSender) { let oom_event = TaskOOM { container_id: id, ..Default::default() }; let topic = oom_event.topic(); let oom_box = Box::new(oom_event); spawn(async move { while let Some(_item) = rx.recv().await { tx.send((topic.to_string(), oom_box.clone())) .await .unwrap_or_else(|e| warn!("send {} to publisher: {}", topic, e)); } }); } #[cfg(target_os = "linux")] async fn monitor_oom(id: &String, pid: u32, tx: EventSender) -> Result<()> { if !is_cgroup2_unified_mode() { let path_from_cgorup = cgroup_memory::get_path_from_cgorup(pid).await?; let (mount_root, mount_point) = cgroup_memory::get_existing_cgroup_mem_path(path_from_cgorup).await?; let mem_cgroup_path = mount_point + &mount_root; let rx = cgroup_memory::register_memory_event( id, Path::new(&mem_cgroup_path), "memory.oom_control", ) .await .map_err(other_error!("register_memory_event failed:"))?; run_oom_monitor(rx, id.to_string(), tx); } Ok(()) } #[async_trait] impl Task for TaskService where F: ContainerFactory + Sync + Send, C: Container + Sync + Send + 'static, { async fn state(&self, _ctx: &TtrpcContext, req: StateRequest) -> TtrpcResult { let container = self.container(req.id()).await?; let exec_id = req.exec_id().as_option(); let resp = container.state(exec_id).await?; Ok(resp) } async fn create( &self, _ctx: &TtrpcContext, req: CreateTaskRequest, ) -> TtrpcResult { info!("Create request for {:?}", &req); // Note: Get containers here is for getting the lock, // to make sure no other threads manipulate the containers metadata; let ns = self.namespace.as_str(); let id = req.id.as_str(); let mut resp = CreateTaskResponse::new(); let pid = { let mut containers = self.containers.write().await; let container = self.factory.create(ns, &req).await?; let pid = container.pid().await as u32; resp.pid = pid; containers.insert(id.to_string(), container); pid }; self.send_event(TaskCreate { container_id: req.id.to_string(), bundle: req.bundle.to_string(), rootfs: req.rootfs, io: Some(TaskIO { stdin: req.stdin.to_string(), stdout: req.stdout.to_string(), stderr: req.stderr.to_string(), terminal: req.terminal, ..Default::default() }) .into(), checkpoint: req.checkpoint.to_string(), pid, ..Default::default() }) .await; info!("Create request for {} returns pid {}", id, resp.pid); Ok(resp) } async fn start(&self, _ctx: &TtrpcContext, req: StartRequest) -> TtrpcResult { info!("Start request for {:?}", &req); let pid = { let mut container = self.container_mut(req.id()).await?; // Prevent the init process from exiting and continuing with start // Return early to reduce the time it takes to return only when runc encounters an error if container.init_state().await == EnumOrUnknown::new(Status::STOPPED) { debug!("container init process has exited, start process should not continue"); return Err(ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Code::FAILED_PRECONDITION, format!("container init process has exited {}", container.id().await), ))); } container.start(req.exec_id.as_str().as_option()).await? }; let mut resp = StartResponse::new(); resp.pid = pid as u32; if req.exec_id.is_empty() { self.send_event(TaskStart { container_id: req.id.to_string(), pid: pid as u32, ..Default::default() }) .await; #[cfg(target_os = "linux")] if let Err(e) = monitor_oom(&req.id, resp.pid, self.tx.clone()).await { error!("monitor_oom failed: {:?}.", e); } } else { self.send_event(TaskExecStarted { container_id: req.id.to_string(), exec_id: req.exec_id.to_string(), pid: pid as u32, ..Default::default() }) .await; }; info!("Start request for {:?} returns pid {}", req, resp.pid()); Ok(resp) } async fn delete(&self, _ctx: &TtrpcContext, req: DeleteRequest) -> TtrpcResult { info!("Delete request for {:?}", &req); let (id, pid, exit_status, exited_at) = { let mut container = self.container_mut(req.id()).await?; let id = container.id().await; let exec_id_opt = req.exec_id().as_option(); let (pid, exit_status, exited_at) = container.delete(exec_id_opt).await?; self.factory.cleanup(&self.namespace, &container).await?; (id, pid, exit_status, exited_at) }; if req.exec_id().is_empty() { self.containers.write().await.remove(req.id()); } let ts = convert_to_timestamp(exited_at); self.send_event(TaskDelete { container_id: id, pid: pid as u32, exit_status: exit_status as u32, exited_at: Some(ts.clone()).into(), ..Default::default() }) .await; let mut resp = DeleteResponse::new(); resp.set_exited_at(ts); resp.set_pid(pid as u32); resp.set_exit_status(exit_status as u32); info!( "Delete request for {} {} returns {:?}", req.id(), req.exec_id(), resp ); Ok(resp) } async fn pids(&self, _ctx: &TtrpcContext, req: PidsRequest) -> TtrpcResult { debug!("Pids request for {:?}", req); let processes = self.container(req.id()).await?.all_processes().await?; debug!("Pids request for {:?} returns successfully", req); Ok(PidsResponse { processes, ..Default::default() }) } async fn pause(&self, _ctx: &TtrpcContext, req: PauseRequest) -> TtrpcResult { info!("pause request for {:?}", req); self.container_mut(req.id()).await?.pause().await?; self.send_event(TaskPaused { container_id: req.id.to_string(), ..Default::default() }) .await; info!("pause request for {:?} returns successfully", req); Ok(Empty::new()) } async fn resume(&self, _ctx: &TtrpcContext, req: ResumeRequest) -> TtrpcResult { info!("resume request for {:?}", req); self.container_mut(req.id()).await?.resume().await?; self.send_event(TaskResumed { container_id: req.id.to_string(), ..Default::default() }) .await; info!("resume request for {:?} returns successfully", req); Ok(Empty::new()) } async fn kill(&self, _ctx: &TtrpcContext, req: KillRequest) -> TtrpcResult { info!("Kill request for {:?}", req); self.container_mut(req.id()) .await? .kill(req.exec_id().as_option(), req.signal, req.all) .await?; info!("Kill request for {:?} returns successfully", req); Ok(Empty::new()) } async fn exec(&self, _ctx: &TtrpcContext, req: ExecProcessRequest) -> TtrpcResult { info!("Exec request for {:?}", req); let exec_id = req.exec_id().to_string(); let container_id = { let mut container = self.container_mut(req.id()).await?; container.exec(req).await?; container.id().await }; self.send_event(TaskExecAdded { container_id, exec_id, ..Default::default() }) .await; Ok(Empty::new()) } async fn resize_pty(&self, _ctx: &TtrpcContext, req: ResizePtyRequest) -> TtrpcResult { debug!( "Resize pty request for container {}, exec_id: {}", &req.id, &req.exec_id ); self.container_mut(req.id()) .await? .resize_pty(req.exec_id().as_option(), req.height, req.width) .await?; Ok(Empty::new()) } async fn close_io(&self, _ctx: &TtrpcContext, req: CloseIORequest) -> TtrpcResult { self.container_mut(req.id()) .await? .close_io(req.exec_id().as_option()) .await?; Ok(Empty::new()) } async fn update(&self, _ctx: &TtrpcContext, mut req: UpdateTaskRequest) -> TtrpcResult { debug!("Update request for id {:?}", req.id); let id = req.take_id(); let data = req .resources .into_option() .map(|r| r.value) .unwrap_or_default(); let resources: LinuxResources = serde_json::from_slice(&data).map_err(|e| { ttrpc::Error::RpcStatus(ttrpc::get_status( ttrpc::Code::INVALID_ARGUMENT, format!("failed to parse resource spec: {}", e), )) })?; debug!("Update resource is {:?}", resources); self.container_mut(&id).await?.update(&resources).await?; Ok(Empty::new()) } async fn wait(&self, _ctx: &TtrpcContext, req: WaitRequest) -> TtrpcResult { info!("Wait request for {:?}", req); let exec_id = req.exec_id.as_str().as_option(); let wait_rx = { let mut container = self.container_mut(req.id()).await?; let state = container.state(exec_id).await?; if state.status() != Status::RUNNING && state.status() != Status::CREATED { let mut resp = WaitResponse::new(); resp.exit_status = state.exit_status; resp.exited_at = state.exited_at; info!("Wait request for {:?} returns {:?}", req, &resp); return Ok(resp); } container.wait_channel(req.exec_id().as_option()).await? }; wait_rx.await.unwrap_or_default(); // get lock again. let (_, code, exited_at) = self .container(req.id()) .await? .get_exit_info(exec_id) .await?; let mut resp = WaitResponse::new(); resp.set_exit_status(code as u32); let ts = convert_to_timestamp(exited_at); resp.set_exited_at(ts); info!("Wait request for {:?} returns {:?}", req, &resp); Ok(resp) } async fn stats(&self, _ctx: &TtrpcContext, req: StatsRequest) -> TtrpcResult { debug!("Stats request for {:?}", req); let stats = self.container(req.id()).await?.stats().await?; let mut resp = StatsResponse::new(); resp.set_stats(convert_to_any(Box::new(stats))?); Ok(resp) } async fn connect( &self, _ctx: &TtrpcContext, req: ConnectRequest, ) -> TtrpcResult { info!("Connect request for {:?}", req); let pid = if let Ok(container) = self.container(req.id()).await { container.pid().await as u32 } else { 0 }; Ok(ConnectResponse { shim_pid: std::process::id(), task_pid: pid, ..Default::default() }) } async fn shutdown(&self, _ctx: &TtrpcContext, _req: ShutdownRequest) -> TtrpcResult { debug!("Shutdown request"); let containers = self.containers.read().await; if !containers.is_empty() { return Ok(Empty::new()); } self.exit.signal(); Ok(Empty::default()) } } ================================================ FILE: crates/shim/Cargo.toml ================================================ [package] name = "containerd-shim" version = "0.11.0" authors = [ "Maksym Pavlenko ", "The containerd Authors", ] description = "containerd shim extension" keywords = ["containerd", "shim", "containers"] categories = ["api-bindings", "asynchronous"] edition.workspace = true license.workspace = true repository.workspace = true homepage.workspace = true [features] async = ["async-trait", "containerd-shim-protos/async", "futures", "tokio"] tracing = ["dep:tracing"] docs = [] [[example]] name = "skeleton_async" required-features = ["async"] [[example]] name = "windows-log-reader" path = "examples/windows_log_reader.rs" [dependencies] which = { version = "8.0.0", default-features = false, features = ["real-sys"] } containerd-shim-protos = { path = "../shim-protos", version = "0.11.0" } go-flag = "0.1.0" sha2 = { version = "0.10", default-features = false, features = ["std"] } libc.workspace = true log = { workspace = true, features = ["std", "kv_unstable"] } nix = { workspace = true, features = [ "fs", "socket", "signal", "mount", "sched", ] } oci-spec = { workspace = true, features = ["runtime"] } signal-hook = "0.3.18" serde = { workspace = true, features = ["derive", "std"] } serde_json = { workspace = true, features = ["std"] } tempfile.workspace = true thiserror.workspace = true time = { workspace = true, features = ["std", "formatting"] } # tracing tracing = { version = "0.1", default-features = false, optional = true } # Async dependencies async-trait = { workspace = true, optional = true } futures = { workspace = true, features = ["std", "alloc"], optional = true } tokio = { workspace = true, features = ["macros", "rt-multi-thread", "process", "sync", "fs", "io-util", "time", "signal", "io-std"], optional = true } [target.'cfg(target_os = "linux")'.dependencies] cgroups-rs.workspace = true [target.'cfg(windows)'.dependencies] mio = { version = "1.1", default-features = false, features = ["os-ext", "os-poll"] } windows-sys = { version = "0.52.0", default-features = false, features = [ "Win32_Foundation", "Win32_System_Console", "Win32_System_Pipes", "Win32_Security", "Win32_Storage_FileSystem", "Win32_System_Threading", ] } [dev-dependencies] tempfile.workspace = true [package.metadata.docs.rs] features = ["docs"] ================================================ FILE: crates/shim/README.md ================================================ # Shim extension for containerd [![Crates.io](https://img.shields.io/crates/v/containerd-shim)](https://crates.io/crates/containerd-shim) [![docs.rs](https://img.shields.io/docsrs/containerd-shim)](https://docs.rs/containerd-shim/latest/containerd_shim/) [![Crates.io](https://img.shields.io/crates/l/containerd-shim)](https://github.com/containerd/rust-extensions/blob/main/LICENSE) [![CI](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml) Rust crate to ease runtime v2 shim implementation. It replicates same [shim.Run](https://github.com/containerd/containerd/blob/dbef1d56d7ebc05bc4553d72c419ed5ce025b05d/runtime/v2/example/cmd/main.go) API offered by containerd's shim v2 runtime implementation written in Go. ## Runtime Runtime v2 introduces a first class shim API for runtime authors to integrate with containerd. The shim API is minimal and scoped to the execution lifecycle of a container. This crate simplifies shim v2 runtime development for containerd. It handles common tasks such as command line parsing, setting up shim's TTRPC server, logging, events, etc. Clients are expected to implement [Shim] and [Task] traits with task handling routines. This generally replicates same API as in Go [version](https://github.com/containerd/containerd/blob/main/core/runtime/v2/example/cmd/main.go). Once implemented, shim's bootstrap code is as easy as: ```text shim::run::("io.containerd.empty.v1") ``` ## Look and feel The API is very similar to the one offered by Go version: ```rust,no_run use std::sync::Arc; use async_trait::async_trait; use containerd_shim::{ asynchronous::{run, spawn, ExitSignal, Shim}, publisher::RemotePublisher, Config, Error, Flags, StartOpts, TtrpcResult, }; use containerd_shim_protos::{ api, api::DeleteResponse, shim_async::Task, ttrpc::r#async::TtrpcContext, }; use log::info; #[derive(Clone)] struct Service { exit: Arc, } #[async_trait] impl Shim for Service { type T = Service; async fn new(_runtime_id: &str, _args: &Flags, _config: &mut Config) -> Self { Service { exit: Arc::new(ExitSignal::default()), } } async fn start_shim(&mut self, opts: StartOpts) -> Result { let grouping = opts.id.clone(); let address = spawn(opts, &grouping, Vec::new()).await?; Ok(address) } async fn delete_shim(&mut self) -> Result { Ok(DeleteResponse::new()) } async fn wait(&mut self) { self.exit.wait().await; } async fn create_task_service(&self, _publisher: RemotePublisher) -> Self::T { self.clone() } } #[async_trait] impl Task for Service { async fn connect( &self, _ctx: &TtrpcContext, _req: api::ConnectRequest, ) -> TtrpcResult { info!("Connect request"); Ok(api::ConnectResponse { version: String::from("example"), ..Default::default() }) } async fn shutdown( &self, _ctx: &TtrpcContext, _req: api::ShutdownRequest, ) -> TtrpcResult { info!("Shutdown request"); self.exit.signal(); Ok(api::Empty::default()) } } #[tokio::main] async fn main() { run::("io.containerd.empty.v1", None).await; } ``` ## How to use with containerd **Note**: All operations are in the root directory of `rust-extensions`. With shim v2 runtime: ```bash $ cargo build --example skeleton $ sudo cp ./target/debug/examples/skeleton /usr/local/bin/containerd-shim-skeleton-v1 $ sudo ctr run --rm --runtime io.containerd.skeleton.v1 -t docker.io/library/hello-world:latest hello ``` Or if on 1.6+ ```bash $ cargo build --example skeleton $ sudo ctr run --rm --runtime ./target/debug/examples/skeleton docker.io/library/hello-world:latest hello ``` Or manually: ```bash $ touch log # Run containerd in background $ sudo TTRPC_ADDRESS="/var/run/containerd/containerd.sock.ttrpc" \ cargo run --example skeleton -- \ -namespace default \ -id 1234 \ -address /var/run/containerd/containerd.sock \ -publish-binary ./bin/containerd \ start unix:///var/run/containerd/eb8e7d1c48c2a1ec.sock $ cargo build --example shim-proto-connect $ sudo ./target/debug/examples/shim-proto-connect unix:///var/run/containerd/eb8e7d1c48c2a1ec.sock Connecting to unix:///var/run/containerd/eb8e7d1c48c2a1ec.sock... Sending `Connect` request... Connect response: version: "example" Sending `Shutdown` request... Shutdown response: "" $ cat log [INFO] server listen started [INFO] server started [INFO] Shim successfully started, waiting for exit signal... [INFO] Connect request [INFO] Shutdown request [INFO] Shutting down shim instance [INFO] close monitor [INFO] listener shutdown for quit flag [INFO] ttrpc server listener stopped [INFO] listener thread stopped [INFO] begin to shutdown connection [INFO] connections closed [INFO] reaper thread exited [INFO] reaper thread stopped ``` ### Running on Windows ```powershell # Run containerd in background $env:TTRPC_ADDRESS="\\.\pipe\containerd-containerd.ttrpc" $ cargo run --example skeleton -- -namespace default -id 1234 -address "\\.\pipe\containerd-containerd" start \\.\pipe\containerd-shim-bc764c65e177434fcefe8257dc440be8b8acf7c96156320d965938f7e9ae1a35-pipe # (Optional) Run the log collector in a separate command window # note: log reader won't work if containerd is connected to the named pipe, this works when running manually to help debug locally $ cargo run --example windows-log-reader \\.\pipe\containerd-shim-default-1234-log Reading logs from: \\.\pipe\containerd-shim-default-1234-log $ cargo run --example shim-proto-connect \\.\pipe\containerd-shim-bc764c65e177434fcefe8257dc440be8b8acf7c96156320d965938f7e9ae1a35-pipe Connecting to \\.\pipe\containerd-shim-bc764c65e177434fcefe8257dc440be8b8acf7c96156320d965938f7e9ae1a35-pipe... Sending `Connect` request... Connect response: version: "example" Sending `Shutdown` request... Shutdown response: "" ``` ## Supported Platforms Currently, following OSs and hardware architectures are supported, and more efforts are needed to enable and validate other OSs and architectures. - Linux - Mac OS - Windows ================================================ FILE: crates/shim/examples/publish.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::env; use containerd_shim::{publisher::RemotePublisher, Context}; use containerd_shim_protos::events::task::TaskOOM; #[cfg(not(feature = "async"))] fn main() { let args: Vec = env::args().collect(); // Must not start with unix:// let address = args .get(1) .ok_or("First argument must be containerd's TTRPC address to publish events") .unwrap(); println!("Connecting: {}", &address); let publisher = RemotePublisher::new(address).expect("Connect failed"); let mut event = TaskOOM::new(); event.set_container_id("123".into()); let ctx = Context::default(); println!("Sending event"); publisher .publish(ctx, "/tasks/oom", "default", Box::new(event)) .expect("Publish failed"); println!("Done"); } #[cfg(feature = "async")] #[tokio::main] async fn main() { let args: Vec = env::args().collect(); // Must not start with unix:// let address = args .get(1) .ok_or("First argument must be containerd's TTRPC address to publish events") .unwrap(); println!("Connecting: {}", &address); let publisher = RemotePublisher::new(address).await.expect("Connect failed"); let mut event = TaskOOM::new(); event.set_container_id("123".into()); let ctx = Context::default(); println!("Sending event"); publisher .publish(ctx, "/tasks/oom", "default", Box::new(event)) .await .expect("Publish failed"); println!("Done"); } ================================================ FILE: crates/shim/examples/skeleton.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #[cfg(not(feature = "async"))] use containerd_shim as shim; #[cfg(not(feature = "async"))] mod skeleton { use std::sync::Arc; use containerd_shim as shim; use log::info; use shim::{ api, synchronous::publisher::RemotePublisher, Config, DeleteResponse, ExitSignal, Flags, TtrpcContext, TtrpcResult, }; #[derive(Clone)] pub(crate) struct Service { exit: Arc, } impl shim::Shim for Service { type T = Service; fn new(_runtime_id: &str, _args: &Flags, _config: &mut Config) -> Self { Service { exit: Arc::new(ExitSignal::default()), } } fn start_shim(&mut self, opts: shim::StartOpts) -> Result { let grouping = opts.id.clone(); let (_child_id, address) = shim::spawn(opts, &grouping, Vec::new())?; Ok(address) } fn delete_shim(&mut self) -> Result { Ok(DeleteResponse::new()) } fn wait(&mut self) { self.exit.wait(); } fn create_task_service(&self, _publisher: RemotePublisher) -> Self::T { self.clone() } } impl shim::Task for Service { fn connect( &self, _ctx: &TtrpcContext, _req: api::ConnectRequest, ) -> TtrpcResult { info!("Connect request"); Ok(api::ConnectResponse { version: String::from("example"), ..Default::default() }) } fn shutdown( &self, _ctx: &TtrpcContext, _req: api::ShutdownRequest, ) -> TtrpcResult { info!("Shutdown request"); self.exit.signal(); Ok(api::Empty::default()) } } } fn main() { #[cfg(not(feature = "async"))] shim::run::("io.containerd.empty.v1", None) } ================================================ FILE: crates/shim/examples/skeleton_async.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::sync::Arc; use async_trait::async_trait; use containerd_shim::{ asynchronous::{run, spawn, ExitSignal, Shim}, publisher::RemotePublisher, Config, Error, Flags, StartOpts, TtrpcResult, }; use containerd_shim_protos::{ api, api::DeleteResponse, shim_async::Task, ttrpc::r#async::TtrpcContext, }; use log::info; #[derive(Clone)] struct Service { exit: Arc, } #[async_trait] impl Shim for Service { type T = Service; async fn new(_runtime_id: &str, _args: &Flags, _config: &mut Config) -> Self { Service { exit: Arc::new(ExitSignal::default()), } } async fn start_shim(&mut self, opts: StartOpts) -> Result { let grouping = opts.id.clone(); let address = spawn(opts, &grouping, Vec::new()).await?; Ok(address) } async fn delete_shim(&mut self) -> Result { Ok(DeleteResponse::new()) } async fn wait(&mut self) { self.exit.wait().await; } async fn create_task_service(&self, _publisher: RemotePublisher) -> Self::T { self.clone() } } #[async_trait] impl Task for Service { async fn connect( &self, _ctx: &TtrpcContext, _req: api::ConnectRequest, ) -> TtrpcResult { info!("Connect request"); Ok(api::ConnectResponse { version: String::from("example"), ..Default::default() }) } async fn shutdown( &self, _ctx: &TtrpcContext, _req: api::ShutdownRequest, ) -> TtrpcResult { info!("Shutdown request"); self.exit.signal(); Ok(api::Empty::default()) } } #[tokio::main] async fn main() { run::("io.containerd.empty.v1", None).await; } ================================================ FILE: crates/shim/examples/windows_log_reader.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #[cfg(windows)] use std::error::Error; #[cfg(windows)] fn main() -> Result<(), Box> { use std::{ env, fs::OpenOptions, os::windows::{ fs::OpenOptionsExt, io::{FromRawHandle, IntoRawHandle}, }, time::Duration, }; use mio::{windows::NamedPipe, Events, Interest, Poll, Token}; use windows_sys::Win32::Storage::FileSystem::FILE_FLAG_OVERLAPPED; let args: Vec = env::args().collect(); let address = args .get(1) .ok_or("First argument must be shims address to read logs (\\\\.\\pipe\\containerd-shim-{ns}-{id}-log) ") .unwrap(); println!("Reading logs from: {}", &address); let mut opts = OpenOptions::new(); opts.read(true) .write(true) .custom_flags(FILE_FLAG_OVERLAPPED); let file = opts.open(address).unwrap(); let mut client = unsafe { NamedPipe::from_raw_handle(file.into_raw_handle()) }; let mut stdio = std::io::stdout(); let mut poll = Poll::new().unwrap(); poll.registry() .register(&mut client, Token(1), Interest::READABLE) .unwrap(); let mut events = Events::with_capacity(128); loop { poll.poll(&mut events, Some(Duration::from_millis(10))) .unwrap(); match std::io::copy(&mut client, &mut stdio) { Ok(_) => break, Err(_) => continue, } } Ok(()) } #[cfg(unix)] fn main() { println!("This example is only for Windows"); } ================================================ FILE: crates/shim/src/args.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::ffi::OsStr; use crate::error::{Error, Result}; /// Flags to be passed from containerd daemon to a shim binary. /// Reflects #[derive(Debug, Default)] pub struct Flags { /// Enable debug output in logs. pub debug: bool, /// Namespace that owns the shim. pub namespace: String, /// Id of the task. pub id: String, /// Abstract socket path to serve. pub socket: String, /// Path to the bundle if not workdir. pub bundle: String, /// GRPC address back to main containerd. pub address: String, /// Path to publish binary (used for publishing events). pub publish_binary: String, /// Shim action (start / delete). /// See pub action: String, /// Version of the shim. pub version: bool, /// get the option protobuf from stdin, print the shim info protobuf to stdout, and exit pub info: bool, } /// Parses command line arguments passed to the shim. #[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "Info"))] pub fn parse>(args: &[S]) -> Result { let mut flags = Flags::default(); let mut version_short = false; let mut version_long = false; let args: Vec = go_flag::parse_args(args, |f| { f.add_flag("debug", &mut flags.debug); f.add_flag("v", &mut version_short); f.add_flag("version", &mut version_long); f.add_flag("namespace", &mut flags.namespace); f.add_flag("id", &mut flags.id); f.add_flag("socket", &mut flags.socket); f.add_flag("bundle", &mut flags.bundle); f.add_flag("address", &mut flags.address); f.add_flag("publish-binary", &mut flags.publish_binary); f.add_flag("info", &mut flags.info); }) .map_err(|e| Error::InvalidArgument(e.to_string()))?; flags.version = version_short || version_long; if let Some(action) = args.first() { flags.action = action.into(); } Ok(flags) } #[cfg(test)] mod tests { use super::*; #[test] fn parse_all() { let args = [ "-debug", "-id", "123", "-namespace", "default", "-socket", "/path/to/socket", "-publish-binary", "/path/to/binary", "-bundle", "bundle", "-address", "address", "delete", ]; let flags = parse(&args).unwrap(); assert!(flags.debug); assert!(!flags.version); assert_eq!(flags.id, "123"); assert_eq!(flags.namespace, "default"); assert_eq!(flags.socket, "/path/to/socket"); assert_eq!(flags.publish_binary, "/path/to/binary"); assert_eq!(flags.bundle, "bundle"); assert_eq!(flags.address, "address"); assert_eq!(flags.action, "delete"); } #[test] fn parse_flags() { let args = ["-id", "123", "-namespace", "default"]; let flags = parse(&args).unwrap(); assert!(!flags.debug); assert_eq!(flags.id, "123"); assert_eq!(flags.namespace, "default"); assert_eq!(flags.action, ""); } #[test] fn parse_action() { let args = ["-namespace", "1", "start"]; let flags = parse(&args).unwrap(); assert_eq!(flags.action, "start"); assert_eq!(flags.id, ""); } #[test] fn parse_version_long_flag() { let flags = parse(&["-version"]).unwrap(); assert!(flags.version); } #[test] fn parse_version_short_flag() { let flags = parse(&["-v"]).unwrap(); assert!(flags.version); } } ================================================ FILE: crates/shim/src/asynchronous/mod.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{ env, io::Read, os::unix::{fs::FileTypeExt, net::UnixListener}, path::Path, process::{self, Command as StdCommand, Stdio}, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, task::{ready, Poll}, }; use async_trait::async_trait; use containerd_shim_protos::{ api::DeleteResponse, protobuf::{well_known_types::any::Any, Message, MessageField}, shim::oci::Options, shim_async::{create_task, Client, Task}, ttrpc::r#async::Server, types::introspection::{self, RuntimeInfo}, }; use futures::stream::{poll_fn, BoxStream, SelectAll, StreamExt}; use libc::{SIGCHLD, SIGINT, SIGPIPE, SIGTERM}; use log::{debug, error, info, warn}; use nix::{ errno::Errno, sys::{ signal::Signal, wait::{self, WaitPidFlag, WaitStatus}, }, unistd::Pid, }; use oci_spec::runtime::Features; use tokio::{io::AsyncWriteExt, process::Command, sync::Notify}; use which::which; const DEFAULT_BINARY_NAME: &str = "runc"; use crate::{ args, asynchronous::{monitor::monitor_notify_by_pid, publisher::RemotePublisher}, error::{Error, Result}, logger, parse_sockaddr, reap, socket_address, util::{asyncify, read_file_to_str, write_str_to_file}, Config, Flags, StartOpts, TTRPC_ADDRESS, }; pub mod monitor; pub mod publisher; pub mod util; /// Asynchronous Main shim interface that must be implemented by all async shims. /// /// Start and delete routines will be called to handle containerd's shim lifecycle requests. #[async_trait] pub trait Shim { /// Type to provide task service for the shim. type T: Task + Send + Sync; /// Create a new instance of async Shim. /// /// # Arguments /// - `runtime_id`: identifier of the container runtime. /// - `id`: identifier of the shim/container, passed in from Containerd. /// - `namespace`: namespace of the shim/container, passed in from Containerd. /// - `config`: for the shim to pass back configuration information async fn new(runtime_id: &str, args: &Flags, config: &mut Config) -> Self; /// Start shim will be called by containerd when launching new shim instance. /// /// It expected to return TTRPC address containerd daemon can use to communicate with /// the given shim instance. /// See /// this is an asynchronous call async fn start_shim(&mut self, opts: StartOpts) -> Result; /// Delete shim will be called by containerd after shim shutdown to cleanup any leftovers. /// this is an asynchronous call async fn delete_shim(&mut self) -> Result; /// Wait for the shim to exit asynchronously. async fn wait(&mut self); /// Create the task service object asynchronously. async fn create_task_service(&self, publisher: RemotePublisher) -> Self::T; } /// Async Shim entry point that must be invoked from tokio `main`. #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] pub async fn run(runtime_id: &str, opts: Option) where T: Shim + Send + Sync + 'static, { if let Some(err) = bootstrap::(runtime_id, opts).await.err() { eprintln!("{}: {:?}", runtime_id, err); process::exit(1); } } /// get runtime info pub fn run_info() -> Result { let mut info = introspection::RuntimeInfo { name: "containerd-shim-runc-v2-rs".to_string(), version: MessageField::some(introspection::RuntimeVersion { version: env!("CARGO_PKG_VERSION").to_string(), revision: String::default(), ..Default::default() }), ..Default::default() }; let mut binary_name = DEFAULT_BINARY_NAME.to_string(); let mut data: Vec = Vec::new(); std::io::stdin() .read_to_end(&mut data) .map_err(io_error!(e, "read stdin"))?; // get BinaryName from stdin if !data.is_empty() { let opts = Any::parse_from_bytes(&data).and_then(|any| Options::parse_from_bytes(&any.value))?; if !opts.binary_name().is_empty() { binary_name = opts.binary_name().to_string(); } } let binary_path = which(binary_name).unwrap(); // get features let output = StdCommand::new(binary_path) .arg("features") .output() .unwrap(); let features: Features = serde_json::from_str(&String::from_utf8_lossy(&output.stdout))?; // set features let features_any = Any { type_url: "types.containerd.io/opencontainers/runtime-spec/1/features/Features".to_string(), // features to json value: serde_json::to_vec(&features)?, ..Default::default() }; info.features = MessageField::some(features_any); Ok(info) } #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] async fn bootstrap(runtime_id: &str, opts: Option) -> Result<()> where T: Shim + Send + Sync + 'static, { // Parse command line let os_args: Vec<_> = env::args_os().collect(); let flags = args::parse(&os_args[1..])?; let ttrpc_address = env::var(TTRPC_ADDRESS)?; // Create shim instance let mut config = opts.unwrap_or_default(); // Setup signals let signals = setup_signals_tokio(&config); if !config.no_sub_reaper { reap::set_subreaper()?; } let mut shim = T::new(runtime_id, &flags, &mut config).await; match flags.action.as_str() { "start" => { let args = StartOpts { id: flags.id, publish_binary: flags.publish_binary, address: flags.address, ttrpc_address, namespace: flags.namespace, debug: flags.debug, }; let address = shim.start_shim(args).await?; let mut stdout = tokio::io::stdout(); stdout .write_all(address.as_bytes()) .await .map_err(io_error!(e, "write stdout"))?; // containerd occasionally read an empty string without flushing the stdout stdout.flush().await.map_err(io_error!(e, "flush stdout"))?; Ok(()) } "delete" => { tokio::spawn(async move { handle_signals(signals).await; }); let response = shim.delete_shim().await?; let resp_bytes = response.write_to_bytes()?; tokio::io::stdout() .write_all(resp_bytes.as_slice()) .await .map_err(io_error!(e, "failed to write response"))?; Ok(()) } _ => { if flags.socket.is_empty() { return Err(Error::InvalidArgument(String::from( "Shim socket cannot be empty", ))); } if !config.no_setup_logger { logger::init( flags.debug, &config.default_log_level, &flags.namespace, &flags.id, )?; } let publisher = RemotePublisher::new(&ttrpc_address).await?; let task = Box::new(shim.create_task_service(publisher).await) as Box; let task_service = create_task(Arc::from(task)); let Some(mut server) = create_server_with_retry(&flags).await? else { signal_server_started(); return Ok(()); }; server = server.register_service(task_service); server.start().await?; signal_server_started(); info!("Shim successfully started, waiting for exit signal..."); tokio::spawn(async move { handle_signals(signals).await; }); shim.wait().await; info!("Shutting down shim instance"); server.shutdown().await.unwrap_or_default(); // NOTE: If the shim server is down(like oom killer), the address // socket might be leaking. if let Ok(address) = read_file_to_str("address").await { remove_socket_silently(&address).await; } Ok(()) } } } /// Helper structure that wraps atomic bool to signal shim server when to shutdown the TTRPC server. /// /// Shim implementations are responsible for calling [`Self::signal`]. pub struct ExitSignal { notifier: Notify, exited: AtomicBool, } impl Default for ExitSignal { fn default() -> Self { ExitSignal { notifier: Notify::new(), exited: AtomicBool::new(false), } } } impl ExitSignal { /// Set exit signal to shutdown shim server. pub fn signal(&self) { self.exited.store(true, Ordering::SeqCst); self.notifier.notify_waiters(); } /// Wait for the exit signal to be set. pub async fn wait(&self) { loop { let notified = self.notifier.notified(); if self.exited.load(Ordering::SeqCst) { return; } notified.await; } } } /// Spawn is a helper func to launch shim process asynchronously. /// Typically this expected to be called from `StartShim`. #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] pub async fn spawn(opts: StartOpts, grouping: &str, vars: Vec<(&str, &str)>) -> Result { let cmd = env::current_exe().map_err(io_error!(e, ""))?; let cwd = env::current_dir().map_err(io_error!(e, ""))?; let address = socket_address(&opts.address, &opts.namespace, grouping); // Activation pattern comes from the hcsshim: https://github.com/microsoft/hcsshim/blob/v0.10.0-rc.7/cmd/containerd-shim-runhcs-v1/serve.go#L57-L70 // another way to do it would to create named pipe and pass it to the child process through handle inheritence but that would require duplicating // the logic in Rust's 'command' for process creation. There is an issue in Rust to make it simplier to specify handle inheritence and this could // be revisited once https://github.com/rust-lang/rust/issues/54760 is implemented. let mut command = Command::new(cmd); command .current_dir(cwd) .stdout(Stdio::piped()) .stdin(Stdio::null()) .stderr(Stdio::null()) .envs(vars) .args([ "-namespace", &opts.namespace, "-id", &opts.id, "-address", &opts.address, "-socket", &address, ]); if opts.debug { command.arg("-debug"); } let mut child = command.spawn().map_err(io_error!(e, "spawn shim"))?; #[cfg(target_os = "linux")] crate::cgroup::set_cgroup_and_oom_score(child.id().unwrap())?; let mut reader = child.stdout.take().unwrap(); tokio::io::copy(&mut reader, &mut tokio::io::stderr()) .await .unwrap(); Ok(address) } #[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))] async fn create_server(flags: &args::Flags) -> Result { use containerd_shim_protos::ttrpc::r#async::transport::Listener; let listener = start_listener(&flags.socket).await?; let listener = Listener::try_from(listener).map_err(io_error!(e, "creating ttrpc listener"))?; let server = Server::new().add_listener(listener); Ok(server) } async fn create_server_with_retry(flags: &args::Flags) -> Result> { // Really try to create a server. let server = match create_server(flags).await { Ok(server) => server, Err(Error::IoError { err, .. }) if err.kind() == std::io::ErrorKind::AddrInUse => { // If the address is already in use then make sure it is up and running and return the address // This allows for running a single shim per container scenarios if let Ok(()) = wait_socket_working(&flags.socket, 5, 200).await { write_str_to_file("address", &flags.socket).await?; return Ok(None); } remove_socket(&flags.socket).await?; create_server(flags).await? } Err(e) => return Err(e), }; Ok(Some(server)) } fn signal_server_started() { use libc::{dup2, STDERR_FILENO, STDOUT_FILENO}; unsafe { if dup2(STDERR_FILENO, STDOUT_FILENO) < 0 { panic!("Error closing pipe: {}", std::io::Error::last_os_error()) } } } #[cfg(unix)] fn signal_stream(kind: i32) -> std::io::Result> { use tokio::signal::unix::{signal, SignalKind}; let kind = SignalKind::from_raw(kind); signal(kind).map(|mut sig| { // The object returned by `signal` is not a `Stream`. // The `poll_fn` function constructs a `Stream` based on a polling function. // We need to create a `Stream` so that we can use the `SelectAll` stream "merge" // all the signal streams. poll_fn(move |cx| { ready!(sig.poll_recv(cx)); Poll::Ready(Some(kind.as_raw_value())) }) .boxed() }) } #[cfg(windows)] fn signal_stream(kind: i32) -> std::io::Result> { use tokio::signal::windows::ctrl_c; // Windows doesn't have similar signal like SIGCHLD // We could implement something if required but for now // just implement support for SIGINT if kind != SIGINT { return Err(std::io::Error::new( std::io::ErrorKind::Other, format!("Invalid signal {kind}"), )); } ctrl_c().map(|mut sig| { // The object returned by `signal` is not a `Stream`. // The `poll_fn` function constructs a `Stream` based on a polling function. // We need to create a `Stream` so that we can use the `SelectAll` stream "merge" // all the signal streams. poll_fn(move |cx| { ready!(sig.poll_recv(cx)); Poll::Ready(Some(kind)) }) .boxed() }) } type Signals = SelectAll>; #[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))] fn setup_signals_tokio(config: &Config) -> Signals { #[cfg(unix)] let signals: &[i32] = if config.no_reaper { &[SIGTERM, SIGINT, SIGPIPE] } else { &[SIGTERM, SIGINT, SIGPIPE, SIGCHLD] }; // Windows doesn't have similar signal like SIGCHLD // We could implement something if required but for now // just listen for SIGINT // Note: see comment at the counterpart in synchronous/mod.rs for details. #[cfg(windows)] let signals: &[i32] = &[SIGINT]; let signals: Vec<_> = signals .iter() .copied() .map(signal_stream) .collect::>() .expect("signal setup failed"); SelectAll::from_iter(signals) } #[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))] async fn handle_signals(signals: Signals) { let mut signals = signals.fuse(); while let Some(sig) = signals.next().await { match sig { SIGPIPE => {} SIGTERM | SIGINT => { debug!("received {}", sig); } SIGCHLD => loop { // Note: see comment at the counterpart in synchronous/mod.rs for details. match wait::waitpid(Some(Pid::from_raw(-1)), Some(WaitPidFlag::WNOHANG)) { Ok(WaitStatus::Exited(pid, status)) => { monitor_notify_by_pid(pid.as_raw(), status) .await .unwrap_or_else(|e| error!("failed to send exit event {}", e)) } Ok(WaitStatus::Signaled(pid, sig, _)) => { debug!("child {} terminated({})", pid, sig); let exit_code = 128 + sig as i32; monitor_notify_by_pid(pid.as_raw(), exit_code) .await .unwrap_or_else(|e| error!("failed to send signal event {}", e)) } Ok(WaitStatus::StillAlive) => { break; } Err(Errno::ECHILD) => { break; } Err(e) => { warn!("error occurred in signal handler: {}", e); } _ => {} } }, _ => { if let Ok(sig) = Signal::try_from(sig) { debug!("received {}", sig); } else { warn!("received invalid signal {}", sig); } } } } } #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] async fn remove_socket_silently(address: &str) { remove_socket(address) .await .unwrap_or_else(|e| warn!("failed to remove socket: {}", e)) } #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] async fn remove_socket(address: &str) -> Result<()> { let path = parse_sockaddr(address); if let Ok(md) = Path::new(path).metadata() { if md.file_type().is_socket() { tokio::fs::remove_file(path).await.map_err(io_error!( e, "failed to remove socket {}", address ))?; } } Ok(()) } #[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))] async fn start_listener(address: &str) -> Result { let addr = address.to_string(); asyncify(move || -> Result { crate::start_listener(&addr).map_err(|e| Error::IoError { context: format!("failed to start listener {}", addr), err: e, }) }) .await } #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] async fn wait_socket_working(address: &str, interval_in_ms: u64, count: u32) -> Result<()> { for _i in 0..count { match Client::connect(address).await { Ok(_) => { return Ok(()); } Err(_) => { tokio::time::sleep(std::time::Duration::from_millis(interval_in_ms)).await; } } } Err(other!("time out waiting for socket {}", address)) } #[cfg(test)] mod tests { use std::sync::Arc; use crate::asynchronous::{start_listener, ExitSignal}; #[tokio::test] async fn test_exit_signal() { let signal = Arc::new(ExitSignal::default()); let cloned = signal.clone(); let handle = tokio::spawn(async move { cloned.wait().await; }); signal.signal(); if let Err(err) = handle.await { panic!("{:?}", err); } } #[tokio::test] async fn test_start_listener() { let tmpdir = tempfile::tempdir().unwrap(); let path = tmpdir.path().to_str().unwrap().to_owned(); let socket = path + "/ns1/id1/socket"; let _listener = start_listener(&socket).await.unwrap(); let _listener2 = start_listener(&socket) .await .expect_err("socket should already in use"); let socket2 = socket + "/socket"; assert!(start_listener(&socket2).await.is_err()); let path = tmpdir.path().to_str().unwrap().to_owned(); let txt_file = path + "/demo.txt"; tokio::fs::write(&txt_file, "test").await.unwrap(); assert!(start_listener(&txt_file).await.is_err()); let context = tokio::fs::read_to_string(&txt_file).await.unwrap(); assert_eq!(context, "test"); } } ================================================ FILE: crates/shim/src/asynchronous/monitor.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{collections::HashMap, sync::LazyLock}; use log::error; use tokio::sync::{ mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}, Mutex, }; use crate::{ error::{Error, Result}, monitor::{ExitEvent, Subject, Topic}, }; pub static MONITOR: LazyLock> = LazyLock::new(|| { Mutex::new(Monitor { seq_id: 0, subscribers: HashMap::new(), topic_subs: HashMap::new(), }) }); pub async fn monitor_subscribe(topic: Topic) -> Result { let mut monitor = MONITOR.lock().await; let s = monitor.subscribe(topic)?; Ok(s) } pub async fn monitor_unsubscribe(sub_id: i64) -> Result<()> { let mut monitor = MONITOR.lock().await; monitor.unsubscribe(sub_id) } pub async fn monitor_notify_by_pid(pid: i32, exit_code: i32) -> Result<()> { let monitor = MONITOR.lock().await; monitor.notify_by_pid(pid, exit_code).await } pub async fn monitor_notify_by_exec(id: &str, exec_id: &str, exit_code: i32) -> Result<()> { let monitor = MONITOR.lock().await; monitor.notify_by_exec(id, exec_id, exit_code).await } pub struct Monitor { pub(crate) seq_id: i64, pub(crate) subscribers: HashMap, pub(crate) topic_subs: HashMap>, } pub(crate) struct Subscriber { pub(crate) topic: Topic, pub(crate) tx: UnboundedSender, } pub struct Subscription { pub id: i64, pub rx: UnboundedReceiver, } impl Monitor { pub fn subscribe(&mut self, topic: Topic) -> Result { let (tx, rx) = unbounded_channel::(); let id = self.seq_id; self.seq_id += 1; let subscriber = Subscriber { tx, topic: topic.clone(), }; self.subscribers.insert(id, subscriber); self.topic_subs.entry(topic).or_default().push(id); Ok(Subscription { id, rx }) } pub async fn notify_by_pid(&self, pid: i32, exit_code: i32) -> Result<()> { let subject = Subject::Pid(pid); self.notify_topic(&Topic::Pid, &subject, exit_code).await; self.notify_topic(&Topic::All, &subject, exit_code).await; Ok(()) } pub async fn notify_by_exec(&self, cid: &str, exec_id: &str, exit_code: i32) -> Result<()> { let subject = Subject::Exec(cid.into(), exec_id.into()); self.notify_topic(&Topic::Exec, &subject, exit_code).await; self.notify_topic(&Topic::All, &subject, exit_code).await; Ok(()) } // notify_topic try best to notify exit codes to all subscribers and log errors. async fn notify_topic(&self, topic: &Topic, subject: &Subject, exit_code: i32) { let mut results = Vec::new(); if let Some(subs) = self.topic_subs.get(topic) { let subscribers = subs.iter().filter_map(|x| self.subscribers.get(x)); for sub in subscribers { let res = sub .tx .send(ExitEvent { subject: subject.clone(), exit_code, }) .map_err(other_error!("failed to send exit code")); results.push(res); } } let mut result_iter = results.iter(); while let Some(Err(e)) = result_iter.next() { error!("failed to send exit code to subscriber {:?}", e) } } pub fn unsubscribe(&mut self, id: i64) -> Result<()> { let sub = self.subscribers.remove(&id); if let Some(s) = sub { self.topic_subs.get_mut(&s.topic).map(|v| { v.iter().position(|&x| x == id).map(|i| { v.remove(i); }) }); } Ok(()) } } #[cfg(test)] mod tests { use crate::{ asynchronous::monitor::{ monitor_notify_by_exec, monitor_notify_by_pid, monitor_subscribe, monitor_unsubscribe, }, monitor::{ExitEvent, Subject, Topic}, }; #[tokio::test] async fn test_monitor() { let mut s = monitor_subscribe(Topic::Pid).await.unwrap(); let mut s1 = monitor_subscribe(Topic::All).await.unwrap(); let mut s2 = monitor_subscribe(Topic::Exec).await.unwrap(); monitor_notify_by_pid(13, 128).await.unwrap(); monitor_notify_by_exec("test-container", "test-exec", 139) .await .unwrap(); // pid subscription receive only pid event if let Some(ExitEvent { subject: Subject::Pid(p), exit_code: ec, }) = s.rx.recv().await { assert_eq!(ec, 128); assert_eq!(p, 13); } else { panic!("can not receive the notified event"); } // topic all receive all events if let Some(ExitEvent { subject: Subject::Pid(p), exit_code: ec, }) = s1.rx.recv().await { assert_eq!(ec, 128); assert_eq!(p, 13); } else { panic!("can not receive the notified event"); } if let Some(ExitEvent { subject: Subject::Exec(cid, eid), exit_code: ec, }) = s1.rx.recv().await { assert_eq!(cid, "test-container"); assert_eq!(eid, "test-exec"); assert_eq!(ec, 139); } else { panic!("can not receive the notified event"); } // exec topic only receive exec exit event if let Some(ExitEvent { subject: Subject::Exec(cid, eid), exit_code: ec, }) = s2.rx.recv().await { assert_eq!(cid, "test-container"); assert_eq!(eid, "test-exec"); assert_eq!(ec, 139); } else { panic!("can not receive the notified event"); } monitor_unsubscribe(s.id).await.unwrap(); monitor_unsubscribe(s1.id).await.unwrap(); monitor_unsubscribe(s2.id).await.unwrap(); } } ================================================ FILE: crates/shim/src/asynchronous/publisher.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::os::unix::io::RawFd; use async_trait::async_trait; use containerd_shim_protos::{ api::Empty, protobuf::MessageDyn, shim::{event::Envelope, events}, shim_async::{Client, Events, EventsClient}, ttrpc, ttrpc::{context::Context, r#async::TtrpcContext}, }; use log::{debug, error, warn}; use tokio::sync::mpsc; use crate::{ error::{self, Result}, util::{asyncify, connect, convert_to_any, timestamp}, }; /// The publisher reports events and uses a queue to retry the event reporting. /// The maximum number of attempts to report is 5 times. /// When the ttrpc client fails to report, it attempts to reconnect to the client and report. /// /// Max queue size const QUEUE_SIZE: i64 = 1024; /// Max try five times const MAX_REQUEUE: i64 = 5; /// Async Remote publisher connects to containerd's TTRPC endpoint to publish events from shim. pub struct RemotePublisher { pub address: String, sender: mpsc::Sender, } #[derive(Clone, Debug)] pub struct Item { ev: Envelope, ctx: Context, count: i64, } impl RemotePublisher { /// Connect to containerd's TTRPC endpoint asynchronously. /// /// containerd uses `/run/containerd/containerd.sock.ttrpc` by default pub async fn new(address: impl AsRef) -> Result { let client = Self::connect(address.as_ref()).await?; // Init the queue channel let (sender, receiver) = mpsc::channel::(QUEUE_SIZE as usize); let rt = RemotePublisher { address: address.as_ref().to_string(), sender, }; rt.process_queue(client, receiver).await; Ok(rt) } /// Process_queue for push events /// /// This is a loop task for dealing event tasks pub async fn process_queue(&self, ttrpc_client: Client, mut receiver: mpsc::Receiver) { let mut client = EventsClient::new(ttrpc_client); let sender = self.sender.clone(); let address = self.address.clone(); tokio::spawn(async move { // only this use receiver while let Some(item) = receiver.recv().await { // drop this event after MAX_REQUEUE try if item.count > MAX_REQUEUE { debug!("drop event {:?}", item); continue; } let mut req = events::ForwardRequest::new(); req.set_envelope(item.ev.clone()); let new_item = Item { ev: item.ev.clone(), ctx: item.ctx.clone(), count: item.count + 1, }; if let Err(e) = client.forward(new_item.ctx.clone(), &req).await { match e { ttrpc::error::Error::RemoteClosed | ttrpc::error::Error::LocalClosed => { warn!("publish fail because the server or client close {:?}", e); // reconnect client if let Ok(c) = Self::connect(address.as_str()).await.map_err(|e| { debug!("reconnect the ttrpc client {:?} fail", e); }) { client = EventsClient::new(c); } } _ => { // TODO! if it is other error , May we should deal with socket file error!("the client forward err is {:?}", e); } } let sender_ref = sender.clone(); // Take a another task requeue , for no blocking the recv task tokio::spawn(async move { // wait for few time and send for imporving the success ratio tokio::time::sleep(tokio::time::Duration::from_secs(new_item.count as u64)) .await; // if channel is full and send fail ,release it after 3 seconds let _ = sender_ref .send_timeout(new_item, tokio::time::Duration::from_secs(3)) .await; }); } } debug!("publisher 'process_queue' quit complete"); }); } async fn connect(address: impl AsRef) -> Result { let addr = address.as_ref().to_string(); let fd = asyncify(move || -> Result { let fd = connect(addr)?; Ok(fd) }) .await?; // Safety: `fd` is a unix socket returned by `connect()`. // `from_raw_unix_socket_fd` takes ownership of the RawFd. Ok(unsafe { Client::from_raw_unix_socket_fd(fd) }) } /// Publish a new event. /// /// Event object can be anything that Protobuf able serialize (e.g. implement `Message` trait). pub async fn publish( &self, ctx: Context, topic: &str, namespace: &str, event: Box, ) -> Result<()> { let mut envelope = Envelope::new(); envelope.set_topic(topic.to_owned()); envelope.set_namespace(namespace.to_owned()); envelope.set_timestamp(timestamp()?); envelope.set_event(convert_to_any(event)?); let item = Item { ev: envelope.clone(), ctx: ctx.clone(), count: 0, }; //if channel is full and send fail ,release it after 3 seconds self.sender .send_timeout(item, tokio::time::Duration::from_secs(3)) .await .map_err(|e| error::Error::Ttrpc(ttrpc::error::Error::Others(e.to_string())))?; Ok(()) } } #[async_trait] impl Events for RemotePublisher { async fn forward( &self, _ctx: &TtrpcContext, req: events::ForwardRequest, ) -> ttrpc::Result { let item = Item { ev: req.envelope().clone(), ctx: Context::default(), count: 0, }; //if channel is full and send fail ,release it after 3 seconds self.sender .send_timeout(item, tokio::time::Duration::from_secs(3)) .await .map_err(|e| error::Error::Ttrpc(ttrpc::error::Error::Others(e.to_string())))?; Ok(Empty::default()) } } #[cfg(test)] mod tests { use std::{os::unix::net::UnixListener, sync::Arc}; use async_trait::async_trait; use containerd_shim_protos::{ api::{Empty, ForwardRequest}, events::task::TaskOOM, shim_async::{create_events, Events}, ttrpc::asynchronous::{transport::Listener, Server}, }; use tokio::sync::{ mpsc::{channel, Sender}, Barrier, }; use super::*; use crate::publisher::ttrpc::r#async::TtrpcContext; struct FakeServer { tx: Sender, } #[async_trait] impl Events for FakeServer { async fn forward(&self, _ctx: &TtrpcContext, req: ForwardRequest) -> ttrpc::Result { let env = req.envelope(); if env.topic() == "/tasks/oom" { self.tx.send(0).await.unwrap(); } else { self.tx.send(-1).await.unwrap(); } Ok(Empty::default()) } } #[tokio::test] async fn test_connect() { let tmpdir = tempfile::tempdir().unwrap(); let path = format!("{}/socket", tmpdir.as_ref().to_str().unwrap()); let path1 = path.clone(); assert!(RemotePublisher::connect("a".repeat(16384)).await.is_err()); assert!(RemotePublisher::connect(&path).await.is_err()); let (tx, mut rx) = channel(1); let server = FakeServer { tx }; let barrier = Arc::new(Barrier::new(2)); let barrier2 = barrier.clone(); let server_thread = tokio::spawn(async move { let listener = UnixListener::bind(&path1).unwrap(); let listener = Listener::try_from(listener).unwrap(); let service = create_events(Arc::new(server)); let mut server = Server::new() .add_listener(listener) .register_service(service); server.start().await.unwrap(); barrier2.wait().await; barrier2.wait().await; server.shutdown().await.unwrap(); }); barrier.wait().await; let client = RemotePublisher::new(&path).await.unwrap(); let mut msg = TaskOOM::new(); msg.set_container_id("test".to_string()); client .publish(Context::default(), "/tasks/oom", "ns1", Box::new(msg)) .await .unwrap(); match rx.recv().await { Some(0) => {} _ => { panic!("the received event is not same as published") } } barrier.wait().await; server_thread.await.unwrap(); } } ================================================ FILE: crates/shim/src/asynchronous/util.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::path::Path; use containerd_shim_protos::{api::Mount, shim::oci::Options}; use libc::mode_t; use nix::sys::stat::Mode; use oci_spec::runtime::Spec; use tokio::{ fs::OpenOptions, io::{AsyncReadExt, AsyncWriteExt}, task::spawn_blocking, }; use crate::{ error::{Error, Result}, util::{AsOption, JsonOptions, CONFIG_FILE_NAME, OPTIONS_FILE_NAME, RUNTIME_FILE_NAME}, }; pub async fn asyncify(f: F) -> Result where F: FnOnce() -> Result + Send + 'static, T: Send + 'static, { spawn_blocking(f) .await .map_err(other_error!("failed to spawn blocking task"))? } pub async fn read_file_to_str(path: impl AsRef) -> Result { let mut file = tokio::fs::File::open(&path).await.map_err(io_error!( e, "failed to open file {}", path.as_ref().display() ))?; let mut content = String::new(); file.read_to_string(&mut content).await.map_err(io_error!( e, "failed to read {}", path.as_ref().display() ))?; Ok(content) } pub async fn write_str_to_file(filename: impl AsRef, s: impl AsRef) -> Result<()> { let file = filename.as_ref().file_name().ok_or_else(|| { Error::InvalidArgument(format!("pid path illegal {}", filename.as_ref().display())) })?; let tmp_path = filename .as_ref() .parent() .map(|x| x.join(format!(".{}", file.to_str().unwrap_or("")))) .ok_or_else(|| Error::InvalidArgument(String::from("failed to create tmp path")))?; let mut f = OpenOptions::new() .write(true) .create_new(true) .open(&tmp_path) .await .map_err(io_error!(e, "open {}", tmp_path.display()))?; f.write_all(s.as_ref().as_bytes()).await.map_err(io_error!( e, "write tmp file {}", tmp_path.display() ))?; tokio::fs::rename(&tmp_path, &filename) .await .map_err(io_error!( e, "rename tmp file to {}", filename.as_ref().display() ))?; Ok(()) } pub async fn read_pid_from_file(pid_path: &Path) -> Result { let pid_str = read_file_to_str(pid_path).await?; let pid = pid_str.parse::()?; Ok(pid) } pub async fn read_spec(bundle: impl AsRef) -> Result { let path = bundle.as_ref().join(CONFIG_FILE_NAME); let content = read_file_to_str(&path).await?; serde_json::from_str::(content.as_str()).map_err(other_error!("read spec")) } // read_options reads the option information from the path. // When the file does not exist, read_options returns nil without an error. pub async fn read_options(bundle: impl AsRef) -> Result { let path = bundle.as_ref().join(OPTIONS_FILE_NAME); if !path.exists() { return Ok(Options::default()); } let opts_str = read_file_to_str(path).await?; let opts = serde_json::from_str::(&opts_str).map_err(other_error!("read options"))?; Ok(opts.into()) } pub async fn read_runtime(bundle: impl AsRef) -> Result { read_file_to_str(bundle.as_ref().join(RUNTIME_FILE_NAME)).await } pub async fn write_options(bundle: impl AsRef, opt: &Options) -> Result<()> { let json_opt = JsonOptions::from(opt.to_owned()); let opts_str = serde_json::to_string(&json_opt)?; let path = bundle.as_ref().join(OPTIONS_FILE_NAME); write_str_to_file(path.as_path(), opts_str.as_str()).await } pub async fn write_runtime(bundle: impl AsRef, binary_name: &str) -> Result<()> { write_str_to_file(bundle.as_ref().join(RUNTIME_FILE_NAME), binary_name).await } pub async fn mount_rootfs(m: &Mount, target: impl AsRef) -> Result<()> { let mount_type = m.type_.to_string(); let source = m.source.to_string(); let options = m.options.to_vec(); let rootfs = target.as_ref().to_owned(); asyncify(move || -> Result<()> { let mount_type = mount_type.as_option(); let source = source.as_option(); crate::mount::mount_rootfs(mount_type, source, options.as_slice(), &rootfs) }) .await } pub async fn mkdir(path: impl AsRef, mode: mode_t) -> Result<()> { let path_buf = path.as_ref().to_path_buf(); asyncify(move || -> Result<()> { if !path_buf.as_path().exists() { let mode = Mode::from_bits(mode).ok_or_else(|| other!("invalid dir mode {}", mode))?; nix::unistd::mkdir(path_buf.as_path(), mode)?; } Ok(()) }) .await } #[cfg(test)] mod tests { use crate::util::{read_file_to_str, write_str_to_file}; #[tokio::test] async fn test_read_write_str() { let tmpdir = tempfile::tempdir().unwrap(); let tmp_file = tmpdir.path().join("test"); let test_str = "this is a test"; write_str_to_file(&tmp_file, test_str).await.unwrap(); let read_str = read_file_to_str(&tmp_file).await.unwrap(); assert_eq!(read_str, test_str); } } ================================================ FILE: crates/shim/src/cgroup.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![cfg(target_os = "linux")] use std::{ error::Error as StdError, fs, io::Read, path::{Path, PathBuf}, }; use cgroups_rs::{ fs::{ cgroup::get_cgroups_relative_paths_by_pid, error::Result as CgResult, hierarchies, Cgroup, MaxValue, Subsystem, }, CgroupPid, }; use containerd_shim_protos::{ cgroups::metrics::{CPUStat, CPUUsage, MemoryEntry, MemoryStat, Metrics, PidsStat, Throttle}, protobuf::{well_known_types::any::Any, Message}, shim::oci::Options, }; use oci_spec::runtime::LinuxResources; use crate::error::{Error, Result}; // OOM_SCORE_ADJ_MAX is from https://github.com/torvalds/linux/blob/master/include/uapi/linux/oom.h#L10 const OOM_SCORE_ADJ_MAX: i64 = 1000; #[cfg_attr(feature = "tracing", tracing::instrument(level = "Info"))] pub fn set_cgroup_and_oom_score(pid: u32) -> Result<()> { if pid == 0 { return Ok(()); } // set cgroup let mut data: Vec = Vec::new(); std::io::stdin() .read_to_end(&mut data) .map_err(io_error!(e, "read stdin"))?; if !data.is_empty() { let opts = Any::parse_from_bytes(&data).and_then(|any| Options::parse_from_bytes(&any.value))?; if !opts.shim_cgroup.is_empty() { add_task_to_cgroup(opts.shim_cgroup.as_str(), pid)?; } } // set oom score adjust_oom_score(pid) } /// Add a process to the given relative cgroup path #[cfg_attr(feature = "tracing", tracing::instrument(level = "Info"))] pub fn add_task_to_cgroup(path: &str, pid: u32) -> Result<()> { let h = hierarchies::auto(); // use relative path here, need to trim prefix '/' let path = path.trim_start_matches('/'); Cgroup::load(h, path) .add_task_by_tgid(CgroupPid::from(pid as u64)) .map_err(other_error!("add task to cgroup")) } /// Sets the OOM score for the process to the parents OOM score + 1 /// to ensure that they parent has a lower score than the shim #[cfg_attr(feature = "tracing", tracing::instrument(level = "Info"))] pub fn adjust_oom_score(pid: u32) -> Result<()> { let score = read_process_oom_score(std::os::unix::process::parent_id())?; if score < OOM_SCORE_ADJ_MAX { write_process_oom_score(pid, score + 1)?; } Ok(()) } #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] fn read_process_oom_score(pid: u32) -> Result { let content = fs::read_to_string(format!("/proc/{}/oom_score_adj", pid)) .map_err(io_error!(e, "read oom score"))?; let score = content .trim() .parse::() .map_err(other_error!("parse oom score"))?; Ok(score) } #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] fn write_process_oom_score(pid: u32, score: i64) -> Result<()> { fs::write(format!("/proc/{}/oom_score_adj", pid), score.to_string()) .map_err(io_error!(e, "write oom score")) } /// Collect process cgroup stats, return only necessary parts of it #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] pub fn collect_metrics(cgroup: &Cgroup) -> Result { let mut metrics = Metrics::new(); // to make it easy, fill the necessary metrics only. for sub_system in Cgroup::subsystems(cgroup) { match sub_system { Subsystem::Cpu(cpu_ctr) => { let mut cpu_usage = CPUUsage::new(); let mut throttle = Throttle::new(); let stat = cpu_ctr.cpu().stat; for line in stat.lines() { let parts = line.split(' ').collect::>(); if parts.len() != 2 { Err(Error::Other(format!("invalid cpu stat line: {}", line)))?; } // https://github.com/opencontainers/runc/blob/dbe8434359ca35af1c1e10df42b1f4391c1e1010/libcontainer/cgroups/fs2/cpu.go#L70 match parts[0] { "usage_usec" => { cpu_usage.set_total(parts[1].parse::().unwrap()); } "user_usec" => { cpu_usage.set_user(parts[1].parse::().unwrap()); } "system_usec" => { cpu_usage.set_kernel(parts[1].parse::().unwrap()); } "nr_periods" => { throttle.set_periods(parts[1].parse::().unwrap()); } "nr_throttled" => { throttle.set_throttled_periods(parts[1].parse::().unwrap()); } "throttled_usec" => { throttle.set_throttled_time(parts[1].parse::().unwrap()); } _ => {} } } let mut cpu_stats = CPUStat::new(); cpu_stats.set_throttling(throttle); cpu_stats.set_usage(cpu_usage); metrics.set_cpu(cpu_stats); } Subsystem::Mem(mem_ctr) => { let mem = mem_ctr.memory_stat(); let mut mem_entry = MemoryEntry::new(); mem_entry.set_usage(mem.usage_in_bytes); let mut mem_stat = MemoryStat::new(); mem_stat.set_usage(mem_entry); mem_stat.set_total_inactive_file(mem.stat.total_inactive_file); metrics.set_memory(mem_stat); } Subsystem::Pid(pid_ctr) => { // ignore cgroup NotFound error let ignore_err = |cr: CgResult| -> CgResult { cr.or_else(|e| { if e.source() .and_then(::downcast_ref::) .map(std::io::Error::kind) == Some(std::io::ErrorKind::NotFound) { Ok(0) } else { Err(e) } }) }; let mut pid_stats = PidsStat::new(); pid_stats.set_current( ignore_err(pid_ctr.get_pid_current()) .map_err(other_error!("get current pid"))?, ); pid_stats.set_limit( ignore_err(pid_ctr.get_pid_max().map(|val| match val { // See https://github.com/opencontainers/runc/blob/dbe8434359ca35af1c1e10df42b1f4391c1e1010/libcontainer/cgroups/fs/pids.go#L55 MaxValue::Max => 0, MaxValue::Value(val) => val as u64, })) .map_err(other_error!("get pid limit"))?, ); metrics.set_pids(pid_stats) } _ => {} } } Ok(metrics) } // get_cgroup will return either cgroup v1 or v2 depending on system configuration #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] pub fn get_cgroup(pid: u32) -> Result { let hierarchies = hierarchies::auto(); let cgroup = if hierarchies.v2() { let path = get_cgroups_v2_path_by_pid(pid)?; Cgroup::load(hierarchies, path) } else { // get container main process cgroup let path = get_cgroups_relative_paths_by_pid(pid).map_err(other_error!("get process cgroup"))?; Cgroup::load_with_relative_paths(hierarchies::auto(), Path::new("."), path) }; Ok(cgroup) } /// Get the cgroups v2 path given a PID #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] pub fn get_cgroups_v2_path_by_pid(pid: u32) -> Result { // todo: should upstream to cgroups-rs let path = format!("/proc/{}/cgroup", pid); let content = fs::read_to_string(path).map_err(io_error!(e, "read cgroup"))?; let content = content.lines().next().unwrap_or(""); let Ok(path) = parse_cgroups_v2_path(content)?.canonicalize() else { return Err(Error::Other("cgroup path not found".to_string())); }; Ok(path) } // https://github.com/opencontainers/runc/blob/1950892f69597aa844cbf000fbdf77610dda3a44/libcontainer/cgroups/fs2/defaultpath.go#L83 #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] fn parse_cgroups_v2_path(content: &str) -> Result { // the entry for cgroup v2 is always in the format like `0::$PATH` // where 0 is the hierarchy ID, the controller name is omitted in cgroup v2 // and $PATH is the cgroup path // see https://docs.kernel.org/admin-guide/cgroup-v2.html let Some(path) = content.strip_prefix("0::") else { return Err(Error::Other(format!("invalid cgroup path: {}", content))); }; let path = path.trim_start_matches('/'); Ok(PathBuf::from(format!("/sys/fs/cgroup/{}", path))) } /// Update process cgroup limits #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] pub fn update_resources(cgroup: &Cgroup, resources: &LinuxResources) -> Result<()> { for sub_system in Cgroup::subsystems(cgroup) { match sub_system { Subsystem::Pid(pid_ctr) => { // set maximum number of PIDs if let Some(pids) = resources.pids() { pid_ctr .set_pid_max(MaxValue::Value(pids.limit())) .map_err(other_error!("set pid max"))?; } } Subsystem::Mem(mem_ctr) => { if let Some(memory) = resources.memory() { //if swap and limit setting have if let (Some(limit), Some(swap)) = (memory.limit(), memory.swap()) { //get current memory_limit let current = mem_ctr.memory_stat().limit_in_bytes; // if the updated swap value is larger than the current memory limit set the swap changes first // then set the memory limit as swap must always be larger than the current limit if current < swap { mem_ctr .set_memswap_limit(swap) .map_err(other_error!("set memsw limit"))?; mem_ctr .set_limit(limit) .map_err(other_error!("set mem limit"))?; } } // set memory limit in bytes if let Some(limit) = memory.limit() { mem_ctr .set_limit(limit) .map_err(other_error!("set mem limit"))?; } // set memory swap limit in bytes if let Some(swap) = memory.swap() { mem_ctr .set_memswap_limit(swap) .map_err(other_error!("set memsw limit"))?; } } } Subsystem::CpuSet(cpuset_ctr) => { if let Some(cpu) = resources.cpu() { // set CPUs to use within the cpuset if let Some(cpus) = cpu.cpus() { cpuset_ctr .set_cpus(cpus) .map_err(other_error!("set CPU sets"))?; } // set list of memory nodes in the cpuset if let Some(mems) = cpu.mems() { cpuset_ctr .set_mems(mems) .map_err(other_error!("set CPU memes"))?; } } } Subsystem::Cpu(cpu_ctr) => { if let Some(cpu) = resources.cpu() { // set CPU shares if let Some(shares) = cpu.shares() { cpu_ctr .set_shares(shares) .map_err(other_error!("set CPU share"))?; } // set CPU hardcap limit if let Some(quota) = cpu.quota() { cpu_ctr .set_cfs_quota(quota) .map_err(other_error!("set CPU quota"))?; } // set CPU hardcap period if let Some(period) = cpu.period() { cpu_ctr .set_cfs_period(period) .map_err(other_error!("set CPU period"))?; } } } Subsystem::HugeTlb(ht_ctr) => { // set the limit if "pagesize" hugetlb usage if let Some(hp_limits) = resources.hugepage_limits() { for limit in hp_limits { ht_ctr .set_limit_in_bytes(limit.page_size().as_str(), limit.limit() as u64) .map_err(other_error!("set huge page limit"))?; } } } _ => {} } } Ok(()) } #[cfg(test)] mod tests { use std::path::PathBuf; use cgroups_rs::{ fs::{hierarchies, Cgroup}, CgroupPid, }; use super::parse_cgroups_v2_path; use crate::cgroup::{ add_task_to_cgroup, adjust_oom_score, read_process_oom_score, OOM_SCORE_ADJ_MAX, }; #[test] fn test_add_cgroup() { let path = "runc_shim_test_cgroup"; let h = hierarchies::auto(); // create cgroup path first let cg = Cgroup::new(h, path).unwrap(); let pid = std::process::id(); add_task_to_cgroup(path, pid).unwrap(); let cg_id = CgroupPid::from(pid as u64); assert!(cg.tasks().contains(&cg_id)); // remove cgroup as possible cg.remove_task_by_tgid(cg_id).unwrap(); cg.delete().unwrap() } #[test] fn test_adjust_oom_score() { let pid = std::process::id(); let score = read_process_oom_score(pid).unwrap(); adjust_oom_score(pid).unwrap(); let new = read_process_oom_score(pid).unwrap(); if score < OOM_SCORE_ADJ_MAX { assert_eq!(new, score + 1) } else { assert_eq!(new, OOM_SCORE_ADJ_MAX) } } #[test] fn test_parse_cgroups_v2_path() { let path = "0::/user.slice/user-1000.slice/session-2.scope"; assert_eq!( parse_cgroups_v2_path(path).unwrap(), PathBuf::from("/sys/fs/cgroup/user.slice/user-1000.slice/session-2.scope") ); } #[test] fn test_parse_cgroups_v2_path_empty() { let path = "0::"; assert_eq!( parse_cgroups_v2_path(path).unwrap(), PathBuf::from("/sys/fs/cgroup/") ); } #[test] fn test_parse_cgroups_v2_path_kube() { let path = "0::/kubepods-besteffort-pod8.slice:cri-containerd:8"; assert_eq!( parse_cgroups_v2_path(path).unwrap(), PathBuf::from("/sys/fs/cgroup/kubepods-besteffort-pod8.slice:cri-containerd:8") ); } } ================================================ FILE: crates/shim/src/error.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use thiserror::Error; use crate::{ monitor::ExitEvent, protos::{protobuf, ttrpc}, }; pub type Result = std::result::Result; #[derive(Debug, Error)] pub enum Error { /// Invalid command line arguments. #[error("Failed to parse command line: {0}")] InvalidArgument(String), /// TTRPC specific error. #[error("TTRPC error: {0}")] Ttrpc(#[from] ttrpc::Error), #[error("Protobuf error: {0}")] Protobuf(#[from] protobuf::Error), #[error("{context} error: {err}")] IoError { context: String, #[source] err: std::io::Error, }, #[error("Env error: {0}")] Env(#[from] std::env::VarError), #[error("Failed to setup logger: {0}")] Setup(#[from] log::SetLoggerError), #[cfg(unix)] #[error("Nix error: {0}")] Nix(#[from] nix::Error), #[error("Failed to get envelope timestamp: {0}")] Timestamp(#[from] std::time::SystemTimeError), #[error("Not Found: {0}")] NotFoundError(String), #[error("Failed pre condition: {0}")] FailedPreconditionError(String), #[cfg(unix)] #[error("{context} error: {err}")] MountError { context: String, #[source] err: nix::Error, }, #[error("Failed to convert json object: {0}")] JSON(#[from] serde_json::Error), #[error("Failed to parse integer: {0}")] ParseInt(#[from] std::num::ParseIntError), #[error("Failed to send exit event: {0}")] Send(#[from] std::sync::mpsc::SendError), #[error("Deadline exceeded: {0}")] DeadlineExceeded(String), #[error("Other: {0}")] Other(String), #[error("Unimplemented method: {0}")] Unimplemented(String), } impl From for ttrpc::Error { fn from(e: Error) -> Self { match e { Error::InvalidArgument(ref s) => { ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INVALID_ARGUMENT, s)) } Error::NotFoundError(ref s) => { ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::NOT_FOUND, s)) } Error::FailedPreconditionError(ref s) => { ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::FAILED_PRECONDITION, s)) } Error::Ttrpc(e) => e, _ => ttrpc::Error::Others(e.to_string()), } } } #[macro_export] macro_rules! io_error { ($e:ident, $($args:tt)+) => { |$e| Error::IoError { context: format_args!($($args)+).to_string(), err: $e, } }; } #[macro_export] macro_rules! mount_error { ($e:ident, $($args:tt)+) => { |$e| Error::MountError { context: format_args!($($args)+).to_string(), err: $e, } }; } #[macro_export] macro_rules! other { ($($args:tt)*) => { Error::Other(format_args!($($args)*).to_string()) }; } #[macro_export] macro_rules! other_error { ($s:expr) => { |e| Error::Other(format!("{}: {}", $s, e)) }; } ================================================ FILE: crates/shim/src/event.rs ================================================ use containerd_shim_protos::{events::task::*, protobuf::MessageDyn}; pub trait Event: MessageDyn { fn topic(&self) -> String; } impl Event for TaskCreate { fn topic(&self) -> String { "/tasks/create".to_string() } } impl Event for TaskStart { fn topic(&self) -> String { "/tasks/start".to_string() } } impl Event for TaskExecAdded { fn topic(&self) -> String { "/tasks/exec-added".to_string() } } impl Event for TaskExecStarted { fn topic(&self) -> String { "/tasks/exec-started".to_string() } } impl Event for TaskPaused { fn topic(&self) -> String { "/tasks/paused".to_string() } } impl Event for TaskResumed { fn topic(&self) -> String { "/tasks/resumed".to_string() } } impl Event for TaskExit { fn topic(&self) -> String { "/tasks/exit".to_string() } } impl Event for TaskDelete { fn topic(&self) -> String { "/tasks/delete".to_string() } } impl Event for TaskOOM { fn topic(&self) -> String { "/tasks/oom".to_string() } } impl Event for TaskCheckpointed { fn topic(&self) -> String { "/tasks/checkpointed".to_string() } } ================================================ FILE: crates/shim/src/lib.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![cfg_attr(feature = "docs", doc = include_str!("../README.md"))] use std::{fs::File, path::PathBuf}; #[cfg(windows)] use std::{fs::OpenOptions, os::windows::prelude::OpenOptionsExt}; #[cfg(unix)] use std::{os::unix::net::UnixListener, path::Path}; pub use containerd_shim_protos as protos; pub use protos::{ shim::shim::DeleteResponse, ttrpc::{context::Context, Result as TtrpcResult}, }; use sha2::{Digest, Sha256}; #[cfg(windows)] use windows_sys::Win32::Storage::FileSystem::FILE_FLAG_OVERLAPPED; #[cfg(feature = "async")] pub use crate::asynchronous::*; pub use crate::error::{Error, Result}; #[cfg(not(feature = "async"))] pub use crate::synchronous::*; #[macro_use] pub mod error; mod args; pub use args::{parse, Flags}; #[cfg(feature = "async")] pub mod asynchronous; pub mod cgroup; pub mod event; pub mod logger; pub mod monitor; #[cfg(target_os = "linux")] pub mod mount_linux; #[cfg(not(target_os = "linux"))] pub mod mount_other; #[cfg(target_os = "linux")] pub use mount_linux as mount; #[cfg(not(target_os = "linux"))] pub use mount_other as mount; mod reap; #[cfg(not(feature = "async"))] pub mod synchronous; pub mod util; /// Generated request/response structures. pub mod api { pub use super::protos::{ api::Status, shim::{oci::Options, shim::*}, types::empty::Empty, }; } macro_rules! cfg_not_async { ($($item:item)*) => { $( #[cfg(not(feature = "async"))] #[cfg_attr(docsrs, doc(cfg(not(feature = "async"))))] $item )* } } macro_rules! cfg_async { ($($item:item)*) => { $( #[cfg(feature = "async")] #[cfg_attr(docsrs, doc(cfg(feature = "async")))] $item )* } } cfg_not_async! { pub use crate::synchronous::publisher; pub use protos::shim::shim_ttrpc::Task; pub use protos::ttrpc::TtrpcContext; } cfg_async! { pub use crate::asynchronous::publisher; pub use protos::shim_async::Task; pub use protos::ttrpc::r#async::TtrpcContext; } const TTRPC_ADDRESS: &str = "TTRPC_ADDRESS"; /// Config of shim binary options provided by shim implementations #[derive(Debug)] pub struct Config { /// Disables automatic configuration of logrus to use the shim FIFO pub no_setup_logger: bool, // Sets the the default log level. Default is info pub default_log_level: String, /// Disables the shim binary from reaping any child process implicitly pub no_reaper: bool, /// Disables setting the shim as a child subreaper. pub no_sub_reaper: bool, } impl Default for Config { fn default() -> Self { Self { no_setup_logger: false, default_log_level: "info".to_string(), no_reaper: false, no_sub_reaper: false, } } } /// Startup options received from containerd to start new shim instance. /// /// These will be passed via [`Shim::start_shim`] to shim. #[derive(Debug, Default)] pub struct StartOpts { /// ID of the container. pub id: String, /// Binary path to publish events back to containerd. pub publish_binary: String, /// Address of the containerd's main socket. pub address: String, /// TTRPC socket address. pub ttrpc_address: String, /// Namespace for the container. pub namespace: String, pub debug: bool, } #[cfg(target_os = "linux")] pub const SOCKET_ROOT: &str = "/run/containerd"; #[cfg(target_os = "macos")] pub const SOCKET_ROOT: &str = "/var/run/containerd"; #[cfg(target_os = "windows")] pub const SOCKET_ROOT: &str = r"\\.\pipe\containerd-containerd"; /// Make socket path from containerd socket path, namespace and id. #[cfg_attr(feature = "tracing", tracing::instrument(level = "Info"))] pub fn socket_address(socket_path: &str, namespace: &str, id: &str) -> String { let path = PathBuf::from(socket_path) .join(namespace) .join(id) .display() .to_string(); let hash = { let mut hasher = Sha256::new(); hasher.update(path); hasher.finalize() }; if cfg!(unix) { format!("unix://{}/s/{:x}", SOCKET_ROOT, hash) } else if cfg!(windows) { format!(r"\\.\pipe\containerd-shim-{:x}-pipe", hash) } else { panic!("unsupported platform") } } #[cfg(unix)] fn parse_sockaddr(addr: &str) -> &str { if let Some(addr) = addr.strip_prefix("unix://") { return addr; } if let Some(addr) = addr.strip_prefix("vsock://") { return addr; } addr } #[cfg(windows)] fn start_listener(address: &str) -> std::io::Result<()> { let mut opts = OpenOptions::new(); opts.read(true) .write(true) .custom_flags(FILE_FLAG_OVERLAPPED); if let Ok(f) = opts.open(address) { info!("found existing named pipe: {}", address); drop(f); return Err(std::io::Error::new( std::io::ErrorKind::AddrInUse, "address already exists", )); } // windows starts the listener on the second invocation of the shim Ok(()) } #[cfg(unix)] fn start_listener(address: &str) -> std::io::Result { let path = parse_sockaddr(address); // Try to create the needed directory hierarchy. if let Some(parent) = Path::new(path).parent() { std::fs::create_dir_all(parent)?; } UnixListener::bind(path) } pub struct Console { pub file: File, } #[cfg(test)] mod tests { use crate::start_listener; #[test] #[cfg(unix)] fn test_start_listener() { let tmpdir = tempfile::tempdir().unwrap(); let path = tmpdir.path().to_str().unwrap().to_owned(); // A little dangerous, may be turned on under controlled environment. //assert!(start_listener("/").is_err()); //assert!(start_listener("/tmp").is_err()); let socket = path + "/ns1/id1/socket"; let _listener = start_listener(&socket).unwrap(); let _listener2 = start_listener(&socket).expect_err("socket should already in use"); let socket2 = socket + "/socket"; assert!(start_listener(&socket2).is_err()); let path = tmpdir.path().to_str().unwrap().to_owned(); let txt_file = path + "demo.txt"; std::fs::write(&txt_file, "test").unwrap(); assert!(start_listener(&txt_file).is_err()); let context = std::fs::read_to_string(&txt_file).unwrap(); assert_eq!(context, "test"); } #[test] #[cfg(windows)] fn test_start_listener_windows() { use mio::windows::NamedPipe; let named_pipe = "\\\\.\\pipe\\test-pipe-duplicate".to_string(); start_listener(&named_pipe).unwrap(); let _pipe_server = NamedPipe::new(named_pipe.clone()).unwrap(); start_listener(&named_pipe).expect_err("address already exists"); } } ================================================ FILE: crates/shim/src/logger.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{ borrow::BorrowMut, fmt::Write as fmtwrite, fs::{File, OpenOptions}, io::{self, Write}, path::Path, str::FromStr, sync::Mutex, }; use log::{ kv::{self, Visitor}, Metadata, Record, }; use time::{format_description::well_known::Rfc3339, OffsetDateTime}; use crate::error::Error; pub const LOG_ENV: &str = "RUST_LOG"; pub struct FifoLogger { file: Mutex, } impl FifoLogger { pub fn new(_namespace: &str, _id: &str) -> io::Result { #[cfg(unix)] let logger = Self::with_path("log")?; #[cfg(windows)] let logger = { let pipe_name = format!(r"\\.\pipe\containerd-shim-{_namespace}-{_id}-log"); Self::with_named_pipe(&pipe_name)? }; Ok(logger) } #[allow(dead_code)] pub fn with_path(path: impl AsRef) -> io::Result { let f = OpenOptions::new() .write(true) .read(false) .create(false) .open(path)?; Ok(FifoLogger::with_file(f)) } pub fn with_file(file: File) -> FifoLogger { let file = Mutex::new(file); FifoLogger { file } } #[cfg(windows)] pub fn with_named_pipe(name: &str) -> io::Result { // Containerd on windows expects the log to be a named pipe in the format of \\.\pipe\containerd---log // There is an assumption that there is always only one client connected which is containerd. // If there is a restart of containerd then logs during that time period will be lost. // // https://github.com/containerd/containerd/blob/v1.7.0/runtime/v2/shim_windows.go#L77 // https://github.com/microsoft/hcsshim/blob/5871d0c4436f131c377655a3eb09fc9b5065f11d/cmd/containerd-shim-runhcs-v1/serve.go#L132-L137 use std::os::windows::io::{AsRawHandle, BorrowedHandle}; use mio::{windows::NamedPipe, Events, Interest, Poll, Token}; let mut pipe_server = NamedPipe::new(name)?; let file = unsafe { BorrowedHandle::borrow_raw(pipe_server.as_raw_handle()) } .try_clone_to_owned()?; let file = File::from(file); let poll = Poll::new()?; poll.registry().register( &mut pipe_server, Token(0), Interest::READABLE | Interest::WRITABLE, )?; std::thread::spawn(move || { let pipe_server = pipe_server; let mut poll = poll; let mut events = Events::with_capacity(128); let _ = pipe_server.connect(); loop { poll.poll(&mut events, None).unwrap(); for event in events.iter() { if event.is_writable() { match pipe_server.connect() { Ok(()) => {} Err(e) if e.kind() == io::ErrorKind::Interrupted => { // this would block just keep processing } Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => { // this would block just keep processing } Err(e) => { panic!("Error connecting to client: {}", e); } }; } if event.is_readable() { pipe_server.disconnect().unwrap(); } } } }); Ok(FifoLogger::with_file(file)) } } pub(crate) struct SimpleWriteVistor { key_values: String, } impl<'kvs> Visitor<'kvs> for SimpleWriteVistor { fn visit_pair(&mut self, k: kv::Key<'kvs>, v: kv::Value<'kvs>) -> Result<(), kv::Error> { write!(&mut self.key_values, " {}=\"{}\"", k, v)?; Ok(()) } } impl SimpleWriteVistor { pub(crate) fn new() -> SimpleWriteVistor { SimpleWriteVistor { key_values: String::new(), } } pub(crate) fn as_str(&self) -> &str { &self.key_values } } impl log::Log for FifoLogger { fn enabled(&self, metadata: &Metadata) -> bool { metadata.level() <= log::max_level() } fn log(&self, record: &Record) { if self.enabled(record.metadata()) { let mut guard = self.file.lock().unwrap(); // collect key_values but don't fail if error parsing let mut writer = SimpleWriteVistor::new(); let _ = record.key_values().visit(&mut writer); // The logger server may have temporarily shutdown, ignore the error instead of panic. // // Manual for pipe/FIFO: https://man7.org/linux/man-pages/man7/pipe.7.html // If all file descriptors referring to the read end of a pipe have been closed, then // a write(2) will cause a SIGPIPE signal to be generated for the calling process. // If the calling process is ignoring this signal, then write(2) fails with the error // EPIPE. let _ = writeln!( guard.borrow_mut(), "time=\"{}\" level={}{} msg=\"{}\"\n", rfc3339_formated(), record.level().as_str().to_lowercase(), writer.as_str(), record.args() ); } } fn flush(&self) { // The logger server may have temporarily shutdown, ignore the error instead of panic. let _ = self.file.lock().unwrap().flush(); } } pub fn init(debug: bool, default_log_level: &str, namespace: &str, id: &str) -> Result<(), Error> { let logger = FifoLogger::new(namespace, id).map_err(io_error!(e, "failed to init logger"))?; configure_logging_level(debug, default_log_level); log::set_boxed_logger(Box::new(logger))?; Ok(()) } fn configure_logging_level(debug: bool, default_log_level: &str) { let debug_level = std::env::var(LOG_ENV).unwrap_or(default_log_level.to_string()); let debug_level = log::LevelFilter::from_str(&debug_level).unwrap_or(log::LevelFilter::Info); let level = if debug && log::LevelFilter::Debug > debug_level { log::LevelFilter::Debug } else { debug_level }; log::set_max_level(level); } pub(crate) fn rfc3339_formated() -> String { OffsetDateTime::now_utc() .format(&Rfc3339) .unwrap_or(OffsetDateTime::now_utc().to_string()) } #[cfg(test)] mod tests { use std::fs; use log::{Log, Record}; use super::*; use crate::Config; #[test] fn test_init_log_level() -> Result<(), Error> { let config = Config::default(); configure_logging_level(false, &config.default_log_level); assert_eq!(log::LevelFilter::Info, log::max_level()); // Default for debug flag from containerd configure_logging_level(true, &config.default_log_level); assert_eq!(log::LevelFilter::Debug, log::max_level()); // ENV different than default std::env::set_var(LOG_ENV, "error"); configure_logging_level(false, &config.default_log_level); assert_eq!(log::LevelFilter::Error, log::max_level()); std::env::set_var(LOG_ENV, "warn"); configure_logging_level(false, &config.default_log_level); assert_eq!(log::LevelFilter::Warn, log::max_level()); std::env::set_var(LOG_ENV, "off"); configure_logging_level(false, &config.default_log_level); assert_eq!(log::LevelFilter::Off, log::max_level()); std::env::set_var(LOG_ENV, "trace"); configure_logging_level(false, &config.default_log_level); assert_eq!(log::LevelFilter::Trace, log::max_level()); std::env::set_var(LOG_ENV, "debug"); configure_logging_level(false, &config.default_log_level); // ENV Different than default from debug flag configure_logging_level(true, &config.default_log_level); assert_eq!(log::LevelFilter::Debug, log::max_level()); std::env::set_var(LOG_ENV, "trace"); configure_logging_level(true, &config.default_log_level); assert_eq!(log::LevelFilter::Trace, log::max_level()); std::env::set_var(LOG_ENV, "info"); configure_logging_level(true, &config.default_log_level); assert_eq!(log::LevelFilter::Debug, log::max_level()); std::env::set_var(LOG_ENV, "off"); configure_logging_level(true, &config.default_log_level); assert_eq!(log::LevelFilter::Debug, log::max_level()); Ok(()) } #[test] fn test_fifo_log() { #[cfg(unix)] use nix::{sys::stat, unistd}; let tmpdir = tempfile::tempdir().unwrap(); let path = tmpdir.path().to_str().unwrap().to_owned() + "/log"; #[cfg(unix)] unistd::mkfifo(Path::new(&path), stat::Mode::S_IRWXU).unwrap(); #[cfg(windows)] File::create(path.clone()).unwrap(); let path1 = path.clone(); let thread = std::thread::spawn(move || { let _fifo = OpenOptions::new() .write(false) .read(true) .create(false) .open(path1) .unwrap(); }); let logger = FifoLogger::with_path(&path).unwrap(); //log::set_boxed_logger(Box::new(logger)).map_err(Error::Setup)?; log::set_max_level(log::LevelFilter::Info); thread.join().unwrap(); let kvs: &[(&str, i32)] = &[("a", 1), ("b", 2)]; let record = Record::builder() .level(log::Level::Error) .line(Some(1)) .file(Some("sample file")) .key_values(&kvs) .build(); logger.log(&record); logger.flush(); } #[test] fn test_supports_structured_logging() { let tmpdir = tempfile::tempdir().unwrap(); let path = tmpdir.path().to_str().unwrap().to_owned() + "/log"; File::create(path.clone()).unwrap(); let logger = FifoLogger::with_path(&path).unwrap(); log::set_max_level(log::LevelFilter::Info); let record = Record::builder() .level(log::Level::Info) .args(format_args!("no keys")) .build(); logger.log(&record); logger.flush(); let contents = fs::read_to_string(path.clone()).unwrap(); assert!(contents.contains("level=info msg=\"no keys\"")); let kvs: &[(&str, i32)] = &[("key", 1), ("b", 2)]; let record = Record::builder() .level(log::Level::Error) .key_values(&kvs) .args(format_args!("structured!")) .build(); logger.log(&record); logger.flush(); let contents = fs::read_to_string(path).unwrap(); assert!(contents.contains("level=error key=\"1\" b=\"2\" msg=\"structured!\"")); } } #[cfg(all(windows, test))] mod windows_tests { use std::{ fs::OpenOptions, io::Read, os::windows::{ fs::OpenOptionsExt, io::{FromRawHandle, IntoRawHandle}, prelude::AsRawHandle, }, time::Duration, }; use log::{Log, Record}; use mio::{windows::NamedPipe, Events, Interest, Poll, Token}; use windows_sys::Win32::{ Foundation::ERROR_PIPE_NOT_CONNECTED, Storage::FileSystem::FILE_FLAG_OVERLAPPED, }; use super::*; #[test] fn test_namedpipe_log_can_write_before_client_connected() { let ns = "test".to_string(); let id = "notconnected".to_string(); let logger = FifoLogger::new(&ns, &id).unwrap(); // test can write before a reader is connected (should succeed but the messages will be dropped) log::set_max_level(log::LevelFilter::Info); let record = Record::builder() .level(log::Level::Info) .line(Some(1)) .file(Some("sample file")) .args(format_args!("hello")) .build(); logger.log(&record); logger.flush(); } #[test] fn test_namedpipe_log() { use std::fs::File; let ns = "test".to_string(); let id = "clients".to_string(); let pipe_name = format!("\\\\.\\pipe\\containerd-shim-{}-{}-log", ns, id); let logger = FifoLogger::new(&ns, &id).unwrap(); let mut client = create_client(pipe_name.as_str()); log::set_max_level(log::LevelFilter::Info); let kvs: &[(&str, i32)] = &[("key", 1), ("b", 2)]; let record = Record::builder() .level(log::Level::Info) .line(Some(1)) .key_values(&kvs) .args(format_args!("hello")) .build(); logger.log(&record); logger.flush(); let buf = read_message(&mut client, 73); let message = std::str::from_utf8(&buf).unwrap(); assert!(message.starts_with("time=\""), "message was: {:?}", message); assert!( message.contains("level=info key=\"1\" b=\"2\" msg=\"hello\"\n"), "message was: {:?}", message ); // test that we can reconnect after a reader disconnects // we need to get the raw handle and drop that as well to force full disconnect // and give a few milliseconds for the disconnect to happen println!("dropping client"); let handle = client.as_raw_handle(); drop(client); let f = unsafe { File::from_raw_handle(handle) }; drop(f); std::thread::sleep(Duration::from_millis(100)); let mut client2 = create_client(pipe_name.as_str()); logger.log(&record); logger.flush(); read_message(&mut client2, 51); } fn read_message(client: &mut NamedPipe, length: usize) -> Vec { let mut poll = Poll::new().unwrap(); poll.registry() .register(client, Token(1), Interest::READABLE) .unwrap(); let mut events = Events::with_capacity(128); let mut buf = vec![0; length]; loop { poll.poll(&mut events, Some(Duration::from_millis(10))) .unwrap(); match client.read(&mut buf) { Ok(0) => { panic!("Read no bytes from pipe") } Ok(_) => { break; } Err(e) if e.raw_os_error() == Some(ERROR_PIPE_NOT_CONNECTED as i32) => { panic!("not connected to the pipe"); } Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => { continue; } Err(e) => panic!("Error reading from pipe: {}", e), } } buf.to_vec() } fn create_client(pipe_name: &str) -> mio::windows::NamedPipe { let mut opts = OpenOptions::new(); opts.read(true) .write(true) .custom_flags(FILE_FLAG_OVERLAPPED); let file = opts.open(pipe_name).unwrap(); unsafe { NamedPipe::from_raw_handle(file.into_raw_handle()) } } } ================================================ FILE: crates/shim/src/monitor.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::fmt; #[cfg(feature = "async")] pub use crate::asynchronous::monitor::*; #[cfg(not(feature = "async"))] pub use crate::synchronous::monitor::*; #[derive(Clone, Eq, Hash, PartialEq)] pub enum Topic { Pid, Exec, All, } #[derive(Debug)] pub struct ExitEvent { // what kind of a thing exit pub subject: Subject, pub exit_code: i32, } impl fmt::Display for ExitEvent { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match &self.subject { Subject::Pid(pid) => { write!(f, "PID {} exit with code {}", pid, self.exit_code) } Subject::Exec(cid, eid) => { write!( f, "EXEC process {} inside {} exit with code {}", eid, cid, self.exit_code ) } } } } #[derive(Clone, Debug)] pub enum Subject { // process pid Pid(i32), // exec with containerd id and exec id for vm container, // if exec is empty, then the event is for the container Exec(String, String), } ================================================ FILE: crates/shim/src/mount_linux.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{ collections::HashMap, env, fs::File, io::BufRead, ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not}, os::fd::AsRawFd, path::Path, sync::LazyLock, }; #[cfg(not(feature = "async"))] use log::error; use nix::mount::{mount, MntFlags, MsFlags}; #[cfg(feature = "async")] use nix::sched::{unshare, CloneFlags}; #[cfg(not(feature = "async"))] use nix::unistd::{fork, ForkResult}; use crate::error::{Error, Result}; #[cfg(not(feature = "async"))] use crate::monitor::{monitor_subscribe, wait_pid, Topic}; struct Flag { clear: bool, flags: MsFlags, } #[cfg(target_os = "linux")] #[derive(Debug, Default)] pub struct LoopParams { readonly: bool, auto_clear: bool, direct: bool, } #[repr(C)] #[derive(Debug)] pub struct LoopInfo { device: u64, inode: u64, rdevice: u64, offset: u64, size_limit: u64, number: u32, encrypt_type: u32, encrypt_key_size: u32, flags: u32, file_name: [u8; 64], crypt_name: [u8; 64], encrypt_key: [u8; 32], init: [u64; 2], } impl Default for LoopInfo { fn default() -> Self { LoopInfo { device: 0, inode: 0, rdevice: 0, offset: 0, size_limit: 0, number: 0, encrypt_type: 0, encrypt_key_size: 0, flags: 0, file_name: [0; 64], crypt_name: [0; 64], encrypt_key: [0; 32], init: [0; 2], } } } const LOOP_CONTROL_PATH: &str = "/dev/loop-control"; #[cfg(feature = "async")] const LOOP_DEV_FORMAT: &str = "/dev/loop"; #[cfg(feature = "async")] const EBUSY_STRING: &str = "device or resource busy"; const OVERLAY_LOWERDIR_PREFIX: &str = "lowerdir="; #[allow(dead_code)] #[derive(Debug, Default, Clone)] struct MountInfo { /// id is a unique identifier of the mount (may be reused after umount). pub id: u32, /// parent is the ID of the parent mount (or of self for the root /// of this mount namespace's mount tree). pub parent: u32, /// major and minor are the major and the minor components of the Dev /// field of unix.Stat_t structure returned by unix.*Stat calls for /// files on this filesystem. pub major: u32, pub minor: u32, /// root is the pathname of the directory in the filesystem which forms /// the root of this mount. pub root: String, /// mountpoint is the pathname of the mount point relative to the /// process's root directory. pub mountpoint: String, /// options is a comma-separated list of mount options. pub options: String, /// optional are zero or more fields of the form "tag[:value]", /// separated by a space. Currently, the possible optional fields are /// "shared", "master", "propagate_from", and "unbindable". For more /// information, see mount_namespaces(7) Linux man page. pub optional: String, /// fs_type is the filesystem type in the form "type[.subtype]". pub fs_type: String, /// source is filesystem-specific information, or "none". pub source: String, /// vfs_options is a comma-separated list of superblock options. pub vfs_options: String, } static MOUNT_FLAGS: LazyLock> = LazyLock::new(|| { let mut mf = HashMap::new(); let zero: MsFlags = MsFlags::empty(); mf.insert( "async", Flag { clear: true, flags: MsFlags::MS_SYNCHRONOUS, }, ); mf.insert( "atime", Flag { clear: true, flags: MsFlags::MS_NOATIME, }, ); mf.insert( "bind", Flag { clear: false, flags: MsFlags::MS_BIND, }, ); mf.insert( "defaults", Flag { clear: false, flags: zero, }, ); mf.insert( "dev", Flag { clear: true, flags: MsFlags::MS_NODEV, }, ); mf.insert( "diratime", Flag { clear: true, flags: MsFlags::MS_NODIRATIME, }, ); mf.insert( "dirsync", Flag { clear: false, flags: MsFlags::MS_DIRSYNC, }, ); mf.insert( "exec", Flag { clear: true, flags: MsFlags::MS_NOEXEC, }, ); mf.insert( "mand", Flag { clear: false, flags: MsFlags::MS_MANDLOCK, }, ); mf.insert( "noatime", Flag { clear: false, flags: MsFlags::MS_NOATIME, }, ); mf.insert( "nodev", Flag { clear: false, flags: MsFlags::MS_NODEV, }, ); mf.insert( "nodiratime", Flag { clear: false, flags: MsFlags::MS_NODIRATIME, }, ); mf.insert( "noexec", Flag { clear: false, flags: MsFlags::MS_NOEXEC, }, ); mf.insert( "nomand", Flag { clear: true, flags: MsFlags::MS_MANDLOCK, }, ); mf.insert( "norelatime", Flag { clear: true, flags: MsFlags::MS_RELATIME, }, ); mf.insert( "nostrictatime", Flag { clear: true, flags: MsFlags::MS_STRICTATIME, }, ); mf.insert( "nosuid", Flag { clear: false, flags: MsFlags::MS_NOSUID, }, ); mf.insert( "rbind", Flag { clear: false, flags: MsFlags::MS_BIND.union(MsFlags::MS_REC), }, ); mf.insert( "relatime", Flag { clear: false, flags: MsFlags::MS_RELATIME, }, ); mf.insert( "remount", Flag { clear: false, flags: MsFlags::MS_REMOUNT, }, ); mf.insert( "ro", Flag { clear: false, flags: MsFlags::MS_RDONLY, }, ); mf.insert( "rw", Flag { clear: true, flags: MsFlags::MS_RDONLY, }, ); mf.insert( "strictatime", Flag { clear: false, flags: MsFlags::MS_STRICTATIME, }, ); mf.insert( "suid", Flag { clear: true, flags: MsFlags::MS_NOSUID, }, ); mf.insert( "sync", Flag { clear: false, flags: MsFlags::MS_SYNCHRONOUS, }, ); mf }); const PROPAGATION_TYPES: MsFlags = MsFlags::MS_SHARED .union(MsFlags::MS_PRIVATE) .union(MsFlags::MS_SLAVE) .union(MsFlags::MS_UNBINDABLE); const MS_PROPAGATION: MsFlags = PROPAGATION_TYPES .union(MsFlags::MS_REC) .union(MsFlags::MS_SILENT); const MS_BIND_RO: MsFlags = MsFlags::MS_BIND.union(MsFlags::MS_RDONLY); fn page_size() -> usize { let ret = unsafe { libc::sysconf(libc::_SC_PAGESIZE) }; assert!(ret > 0, "sysconf(_SC_PAGESIZE) failed"); ret as usize } fn options_size(options: &[String]) -> usize { options.iter().fold(0, |sum, x| sum + x.len()) } fn longest_common_prefix(dirs: &[String]) -> &str { if dirs.is_empty() { return ""; } let first_dir = &dirs[0]; for (i, byte) in first_dir.as_bytes().iter().enumerate() { for dir in dirs { if dir.as_bytes().get(i) != Some(byte) { let mut end = i; // guaranteed not to underflow since is_char_boundary(0) is always true while !first_dir.is_char_boundary(end) { end -= 1; } return &first_dir[0..end]; } } } first_dir } // NOTE: the snapshot id is based on digits. // in order to avoid to get snapshots/x, should be back to parent dir. // however, there is assumption that the common dir is ${root}/io.containerd.v1.overlayfs/snapshots. fn trim_flawed_dir(s: &str) -> String { s[0..s.rfind('/').unwrap_or(0) + 1].to_owned() } #[derive(Default)] struct LowerdirCompactor { options: Vec, lowerdirs: Option>, lowerdir_prefix: Option, } impl LowerdirCompactor { fn new(options: &[String]) -> Self { Self { options: options.to_vec(), ..Self::default() } } fn lowerdirs(&mut self) -> &mut Self { self.lowerdirs = Some( self.options .iter() .filter(|x| x.starts_with(OVERLAY_LOWERDIR_PREFIX)) .map(|x| x.strip_prefix(OVERLAY_LOWERDIR_PREFIX).unwrap_or(x)) .flat_map(|x| x.split(':')) .map(str::to_string) .collect(), ); self } fn lowerdir_prefix(&mut self) -> &mut Self { self.lowerdir_prefix = self .lowerdirs .as_ref() .filter(|x| x.len() > 1) .map(|x| longest_common_prefix(x)) .map(trim_flawed_dir) .filter(|x| !x.is_empty() && x != "/"); self } fn compact(&mut self) -> (Option, Vec) { self.lowerdirs().lowerdir_prefix(); if let Some(chdir) = &self.lowerdir_prefix { let lowerdir_str = self .lowerdirs .as_ref() .unwrap_or(&Vec::new()) .iter() .map(|x| x.strip_prefix(chdir).unwrap_or(x)) .collect::>() .join(":"); let replace = |x: &str| -> String { if x.starts_with(OVERLAY_LOWERDIR_PREFIX) { format!("{}{}", OVERLAY_LOWERDIR_PREFIX, lowerdir_str) } else { x.to_string() } }; ( self.lowerdir_prefix.clone(), self.options .iter() .map(|x| replace(x)) .collect::>(), ) } else { (None, self.options.to_vec()) } } } enum MountExitCode { NixUnknownErr, ChdirErr, Success, NixOtherErr(i32), } impl From for MountExitCode { fn from(code: i32) -> Self { match code { -2 => MountExitCode::NixUnknownErr, -1 => MountExitCode::ChdirErr, 0 => MountExitCode::Success, _ => MountExitCode::NixOtherErr(code), } } } impl From for i32 { fn from(code: MountExitCode) -> Self { match code { MountExitCode::NixUnknownErr => -2, MountExitCode::ChdirErr => -1, MountExitCode::Success => 0, MountExitCode::NixOtherErr(errno) => errno, } } } impl From for MountExitCode { fn from(err: nix::errno::Errno) -> Self { match err { nix::errno::Errno::UnknownErrno => MountExitCode::NixUnknownErr, _ => MountExitCode::NixOtherErr(err as i32), } } } impl From for nix::errno::Errno { fn from(code: MountExitCode) -> Self { match code { MountExitCode::NixOtherErr(errno) => nix::errno::Errno::from_raw(errno), _ => nix::errno::Errno::UnknownErrno, } } } impl From for Result<()> { fn from(code: MountExitCode) -> Self { match code { MountExitCode::NixUnknownErr => Err(other!( "mount process exit unexpectedly, exit code: {}", nix::errno::Errno::from(code) )), MountExitCode::ChdirErr => Err(other!("mount process exit unexpectedly: chdir failed")), MountExitCode::Success => Ok(()), MountExitCode::NixOtherErr(errno) => Err(other!( "mount process exit unexpectedly, exit code: {}", nix::errno::Errno::from_raw(errno) )), } } } #[cfg(not(feature = "async"))] pub fn mount_rootfs( fs_type: Option<&str>, source: Option<&str>, options: &[String], target: impl AsRef, ) -> Result<()> { //TODO add helper to mount fuse let max_size = page_size(); // avoid hitting one page limit of mount argument buffer // // NOTE: 512 id a buffer during pagesize check. let (chdir, options) = if fs_type.unwrap_or("") == "overlay" && options_size(options) >= max_size - 512 { LowerdirCompactor::new(options).compact() } else { (None, options.to_vec()) }; let mut flags: MsFlags = MsFlags::empty(); let mut data = Vec::new(); options.iter().for_each(|x| { if let Some(f) = MOUNT_FLAGS.get(x.as_str()) { if f.clear { flags.bitand_assign(f.flags.not()); } else { flags.bitor_assign(f.flags) } } else { data.push(x.as_str()) } }); let opt = data.join(","); if opt.len() > max_size { return Err(other!("mount option is too long")); } let data = if !data.is_empty() { Some(opt.as_str()) } else { None }; let s = monitor_subscribe(Topic::All)?; match unsafe { fork() } { Ok(ForkResult::Parent { child, .. }) => { let code: MountExitCode = wait_pid(i32::from(child), s).into(); code.into() } Ok(ForkResult::Child) => { if let Some(workdir) = chdir { env::set_current_dir(Path::new(&workdir)).unwrap_or_else(|_| { unsafe { libc::_exit(i32::from(MountExitCode::ChdirErr)) }; }); } // mount with non-propagation first, or remount with changed data let oflags = flags.bitand(PROPAGATION_TYPES.not()); let zero: MsFlags = MsFlags::empty(); if flags.bitand(MsFlags::MS_REMOUNT).eq(&zero) || data.is_some() { mount(source, target.as_ref(), fs_type, oflags, data).unwrap_or_else(|err| { error!( "Mount {:?} to {} failed: {}", source, target.as_ref().display(), err ); let code: MountExitCode = err.into(); unsafe { libc::_exit(code.into()) }; }); } // change the propagation type if flags.bitand(PROPAGATION_TYPES).ne(&zero) { mount::( None, target.as_ref(), None, flags.bitand(MS_PROPAGATION), None, ) .unwrap_or_else(|err| { error!( "Change {} mount propagation faied: {}", target.as_ref().display(), err ); let code: MountExitCode = err.into(); unsafe { libc::_exit(code.into()) }; }); } if oflags.bitand(MS_BIND_RO).eq(&MS_BIND_RO) { mount::( None, target.as_ref(), None, oflags.bitor(MsFlags::MS_REMOUNT), None, ) .unwrap_or_else(|err| { error!( "Change {} read-only failed: {}", target.as_ref().display(), err ); let code: MountExitCode = err.into(); unsafe { libc::_exit(code.into()) }; }); } unsafe { libc::_exit(i32::from(MountExitCode::Success)) }; } Err(_) => Err(other!("fork mount process failed")), } } #[cfg(feature = "async")] pub fn mount_rootfs( fs_type: Option<&str>, source: Option<&str>, options: &[String], target: impl AsRef, ) -> Result<()> { //TODO add helper to mount fuse let max_size = page_size(); // NOTE: 512 id a buffer during pagesize check. let (chdir, options) = if fs_type.unwrap_or("") == "overlay" && options_size(options) >= max_size - 512 { LowerdirCompactor::new(options).compact() } else { (None, options.to_vec()) }; let mut flags: MsFlags = MsFlags::empty(); let mut data = Vec::new(); let mut lo_setup = false; let mut loop_params = LoopParams::default(); options.iter().for_each(|x| { if let Some(f) = MOUNT_FLAGS.get(x.as_str()) { if f.clear { flags.bitand_assign(f.flags.not()); } else { flags.bitor_assign(f.flags) } } else if x.as_str() == "loop" { lo_setup = true; } else { data.push(x.as_str()) } }); let opt = data.join(","); let data = if !data.is_empty() { Some(opt.as_str()) } else { None }; if let Some(workdir) = chdir { unshare(CloneFlags::CLONE_FS)?; env::set_current_dir(Path::new(&workdir)).unwrap_or_else(|_| { unsafe { libc::_exit(i32::from(MountExitCode::ChdirErr)) }; }); } // mount with non-propagation first, or remount with changed data let oflags = flags.bitand(PROPAGATION_TYPES.not()); if lo_setup { loop_params = LoopParams { readonly: oflags.bitand(MsFlags::MS_RDONLY) == MsFlags::MS_RDONLY, auto_clear: true, direct: false, }; } let zero: MsFlags = MsFlags::empty(); if flags.bitand(MsFlags::MS_REMOUNT).eq(&zero) || data.is_some() { let lo_file: String; let s = if lo_setup { lo_file = setup_loop(source, loop_params)?; Some(lo_file.as_str()) } else { source }; mount(s, target.as_ref(), fs_type, oflags, data).map_err(mount_error!( e, "Mount {:?} to {}", source, target.as_ref().display() ))?; } // change the propagation type if flags.bitand(PROPAGATION_TYPES).ne(&zero) { mount::(None, target.as_ref(), None, MS_PROPAGATION, None).map_err( mount_error!(e, "Change {} mount propagation", target.as_ref().display()), )?; } if oflags.bitand(MS_BIND_RO).eq(&MS_BIND_RO) { mount::( None, target.as_ref(), None, oflags.bitor(MsFlags::MS_REMOUNT), None, ) .map_err(mount_error!( e, "Change {} read-only", target.as_ref().display() ))?; } Ok(()) } #[cfg(feature = "async")] fn setup_loop(source: Option<&str>, params: LoopParams) -> Result { let src = source.ok_or(other!("loop source is None"))?; for _ in 0..100 { let num = get_free_loop_dev()?; let loop_dev = format!("{}{}", LOOP_DEV_FORMAT, num); match setup_loop_dev(src, loop_dev.as_str(), ¶ms) { Ok(_) => return Ok(loop_dev), Err(e) => { if e.to_string().contains(EBUSY_STRING) { continue; } else { return Err(e); } } } } Err(Error::Other( "creating new loopback device after 100 times".to_string(), )) } pub fn get_free_loop_dev() -> Result { const LOOP_CTL_GET_FREE: i32 = 0x4c82; let loop_control = File::options() .read(true) .write(true) .open(LOOP_CONTROL_PATH) .map_err(|e| Error::IoError { context: format!("open {} error: ", LOOP_CONTROL_PATH), err: e, })?; unsafe { #[cfg(target_env = "gnu")] let ret = libc::ioctl( loop_control.as_raw_fd() as libc::c_int, LOOP_CTL_GET_FREE as libc::c_ulong, ) as i32; #[cfg(target_env = "musl")] let ret = libc::ioctl( loop_control.as_raw_fd() as libc::c_int, LOOP_CTL_GET_FREE as libc::c_int, ) as i32; match nix::errno::Errno::result(ret) { Ok(ret) => Ok(ret), Err(e) => Err(Error::Nix(e)), } } } pub fn setup_loop_dev(backing_file: &str, loop_dev: &str, params: &LoopParams) -> Result { const LOOP_SET_FD: u32 = 0x4c00; const LOOP_CLR_FD: u32 = 0x4c01; const LOOP_SET_STATUS64: u32 = 0x4c04; const LOOP_SET_DIRECT_IO: u32 = 0x4c08; const LO_FLAGS_READ_ONLY: u32 = 0x1; const LO_FLAGS_AUTOCLEAR: u32 = 0x4; let mut open_options = File::options(); open_options.read(true); if !params.readonly { open_options.write(true); } // 1. open backing file let back = open_options .open(backing_file) .map_err(|e| Error::IoError { context: format!("open {} error: ", backing_file), err: e, })?; let loop_dev = open_options.open(loop_dev).map_err(|e| Error::IoError { context: format!("open {} error: ", loop_dev), err: e, })?; // 2. set FD unsafe { #[cfg(target_env = "gnu")] let ret = libc::ioctl( loop_dev.as_raw_fd() as libc::c_int, LOOP_SET_FD as libc::c_ulong, back.as_raw_fd() as libc::c_int, ); #[cfg(target_env = "musl")] let ret = libc::ioctl( loop_dev.as_raw_fd() as libc::c_int, LOOP_SET_FD as libc::c_int, back.as_raw_fd() as libc::c_int, ); if let Err(e) = nix::errno::Errno::result(ret) { return Err(Error::Nix(e)); } } // 3. set info let mut info = LoopInfo::default(); let backing_file_truncated = if backing_file.len() > info.file_name.len() { &backing_file[0..info.file_name.len()] } else { backing_file }; info.file_name[..backing_file_truncated.len()] .copy_from_slice(backing_file_truncated.as_bytes()); if params.readonly { info.flags |= LO_FLAGS_READ_ONLY; } if params.auto_clear { info.flags |= LO_FLAGS_AUTOCLEAR; } unsafe { #[cfg(target_env = "gnu")] let ret = libc::ioctl( loop_dev.as_raw_fd() as libc::c_int, LOOP_SET_STATUS64 as libc::c_ulong, &info, ); #[cfg(target_env = "musl")] let ret = libc::ioctl( loop_dev.as_raw_fd() as libc::c_int, LOOP_SET_STATUS64 as libc::c_int, &info, ); #[cfg(target_env = "gnu")] if let Err(e) = nix::errno::Errno::result(ret) { libc::ioctl( loop_dev.as_raw_fd() as libc::c_int, LOOP_CLR_FD as libc::c_ulong, 0, ); return Err(Error::Nix(e)); } #[cfg(target_env = "musl")] if let Err(e) = nix::errno::Errno::result(ret) { libc::ioctl( loop_dev.as_raw_fd() as libc::c_int, LOOP_CLR_FD as libc::c_int, 0, ); return Err(Error::Nix(e)); } } // 4. Set Direct IO if params.direct { unsafe { #[cfg(target_env = "gnu")] let ret = libc::ioctl( loop_dev.as_raw_fd() as libc::c_int, LOOP_SET_DIRECT_IO as libc::c_ulong, 1, ); #[cfg(target_env = "musl")] let ret = libc::ioctl( loop_dev.as_raw_fd() as libc::c_int, LOOP_SET_DIRECT_IO as libc::c_int, 1, ); if let Err(e) = nix::errno::Errno::result(ret) { #[cfg(target_env = "gnu")] libc::ioctl( loop_dev.as_raw_fd() as libc::c_int, LOOP_CLR_FD as libc::c_ulong, 0, ); #[cfg(target_env = "musl")] libc::ioctl( loop_dev.as_raw_fd() as libc::c_int, LOOP_CLR_FD as libc::c_int, 0, ); return Err(Error::Nix(e)); } } } Ok(loop_dev) } pub fn umount_recursive(target: Option<&str>, flags: i32) -> Result<()> { if let Some(target) = target { let mut mounts = get_mounts(Some(prefix_filter(target.to_string())))?; mounts.sort_by(|a, b| b.mountpoint.len().cmp(&a.mountpoint.len())); for target in &mounts { umount_all(Some(target.mountpoint.clone()), flags)?; } }; Ok(()) } fn umount_all(target: Option, flags: i32) -> Result<()> { if let Some(target) = target { if let Err(e) = std::fs::metadata(target.clone()) { if e.kind() == std::io::ErrorKind::NotFound { return Ok(()); } } loop { if let Err(e) = nix::mount::umount2( &std::path::PathBuf::from(&target), MntFlags::from_bits(flags).unwrap_or(MntFlags::empty()), ) { if e == nix::errno::Errno::EINVAL { return Ok(()); } return Err(Error::from(e)); } } }; Ok(()) } fn prefix_filter(prefix: String) -> impl Fn(MountInfo) -> bool { move |m: MountInfo| !(m.mountpoint.clone() + "/").starts_with(&(prefix.clone() + "/")) } fn get_mounts(f: Option) -> Result> where F: Fn(MountInfo) -> bool, { let mountinfo_path = "/proc/self/mountinfo"; let file = std::fs::File::open(mountinfo_path).map_err(io_error!(e, "io_error"))?; let reader = std::io::BufReader::new(file); let lines: Vec = reader.lines().map_while(|line| line.ok()).collect(); let mount_points = lines .into_iter() .filter_map(|line| { /* See http://man7.org/linux/man-pages/man5/proc.5.html 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) (1) mount ID: unique identifier of the mount (may be reused after umount) (2) parent ID: ID of parent (or of self for the top of the mount tree) (3) major:minor: value of st_dev for files on filesystem (4) root: root of the mount within the filesystem (5) mount point: mount point relative to the process's root (6) mount options: per mount options (7) optional fields: zero or more fields of the form "tag[:value]" (8) separator: marks the end of the optional fields (9) filesystem type: name of filesystem of the form "type[.subtype]" (10) mount source: filesystem specific information or "none" (11) super options: per super block options In other words, we have: * 6 mandatory fields (1)..(6) * 0 or more optional fields (7) * a separator field (8) * 3 mandatory fields (9)..(11) */ let parts: Vec<&str> = line.split_whitespace().collect(); if parts.len() < 10 { // mountpoint parse error. return None; } // separator field let mut sep_idx = parts.len() - 4; // In Linux <= 3.9 mounting a cifs with spaces in a share // name (like "//srv/My Docs") _may_ end up having a space // in the last field of mountinfo (like "unc=//serv/My Docs"). // Since kernel 3.10-rc1, cifs option "unc=" is ignored, // so spaces should not appear. // // Check for a separator, and work around the spaces bug for i in (0..sep_idx).rev() { if parts[i] == "-" { sep_idx = i; break; } if sep_idx == 5 { // mountpoint parse error return None; } } let mut mount_info = MountInfo { id: str::parse::(parts[0]).ok()?, parent: str::parse::(parts[1]).ok()?, major: 0, minor: 0, root: parts[3].to_string(), mountpoint: parts[4].to_string(), options: parts[5].to_string(), optional: parts[6..sep_idx].join(" "), fs_type: parts[sep_idx + 1].to_string(), source: parts[sep_idx + 2].to_string(), vfs_options: parts[sep_idx + 3].to_string(), }; let major_minor = parts[2].splitn(3, ':').collect::>(); if major_minor.len() != 2 { // mountpoint parse error. return None; } mount_info.major = str::parse::(major_minor[0]).ok()?; mount_info.minor = str::parse::(major_minor[1]).ok()?; if let Some(f) = &f { if f(mount_info.clone()) { // skip this mountpoint. This mountpoint is not the container's mountpoint return None; } } Some(mount_info) }) .collect::>(); Ok(mount_points) } #[cfg(test)] mod tests { use super::*; #[test] fn test_trim_flawed_dir() { let mut tcases: Vec<(&str, String)> = Vec::new(); tcases.push(("/", "/".to_string())); tcases.push(("/foo", "/".to_string())); tcases.push(("/.foo-_bar/foo", "/.foo-_bar/".to_string())); tcases.push(("/.foo-_bar/foo/", "/.foo-_bar/foo/".to_string())); tcases.push(("/.foo-_bar/foo/bar", "/.foo-_bar/foo/".to_string())); tcases.push(("/.foo-_bar/foo/bar/", "/.foo-_bar/foo/bar/".to_string())); for (case, expected) in tcases { let res = trim_flawed_dir(case); assert_eq!(res, expected); } } #[test] fn test_longest_common_prefix() { let mut tcases: Vec<(Vec, String)> = Vec::new(); tcases.push((vec![], "".to_string())); tcases.push((vec!["foo".to_string()], "foo".to_string())); tcases.push((vec!["foo".to_string(), "bar".to_string()], "".to_string())); tcases.push(( vec!["foo".to_string(), "foo".to_string()], "foo".to_string(), )); tcases.push(( vec!["foo".to_string(), "foobar".to_string()], "foo".to_string(), )); tcases.push(( vec!["foo".to_string(), "".to_string(), "foobar".to_string()], "".to_string(), )); for (case, expected) in tcases { let res = longest_common_prefix(&case); assert_eq!(res, expected); } } #[test] fn test_compact_lowerdir_option() { let mut tcases: Vec<(Vec, Option, Vec)> = Vec::new(); tcases.push(( vec!["workdir=a".to_string()], None, vec!["workdir=a".to_string()], )); tcases.push(( vec!["workdir=a".to_string(), "lowerdir=b".to_string()], None, vec!["workdir=a".to_string(), "lowerdir=b".to_string()], )); tcases.push(( vec!["lowerdir=/snapshots/1/fs:/snapshots/10/fs".to_string()], Some("/snapshots/".to_string()), vec!["lowerdir=1/fs:10/fs".to_string()], )); tcases.push(( vec![ "workdir=a".to_string(), "lowerdir=/snapshots/1/fs:/snapshots/10/fs".to_string(), ], Some("/snapshots/".to_string()), vec!["workdir=a".to_string(), "lowerdir=1/fs:10/fs".to_string()], )); tcases.push(( vec!["lowerdir=/snapshots/1/fs:/snapshots/10/fs:/snapshots/2/fs".to_string()], Some("/snapshots/".to_string()), vec!["lowerdir=1/fs:10/fs:2/fs".to_string()], )); tcases.push(( vec![ "workdir=a".to_string(), "lowerdir=/snapshots/1/fs:/snapshots/10/fs:/snapshots/2/fs".to_string(), ], Some("/snapshots/".to_string()), vec![ "workdir=a".to_string(), "lowerdir=1/fs:10/fs:2/fs".to_string(), ], )); tcases.push(( vec!["lowerdir=/snapshots/1/fs:/other_snapshots/1/fs".to_string()], None, vec!["lowerdir=/snapshots/1/fs:/other_snapshots/1/fs".to_string()], )); tcases.push(( vec![ "workdir=a".to_string(), "lowerdir=/snapshots/1/fs:/other_snapshots/1/fs".to_string(), ], None, vec![ "workdir=a".to_string(), "lowerdir=/snapshots/1/fs:/other_snapshots/1/fs".to_string(), ], )); for (case, expected_chdir, expected_options) in tcases { let (chdir, options) = LowerdirCompactor::new(&case).compact(); assert_eq!(chdir, expected_chdir); assert_eq!(options, expected_options); } } #[cfg(feature = "async")] #[test] fn test_mount_rootfs_umount_recursive() { let target = tempfile::tempdir().expect("create target dir error"); let lower1 = tempfile::tempdir().expect("create lower1 dir error"); let lower2 = tempfile::tempdir().expect("create lower2 dir error"); let upperdir = tempfile::tempdir().expect("create upperdir dir error"); let workdir = tempfile::tempdir().expect("create workdir dir error"); let options = vec![ "lowerdir=".to_string() + lower1.path().to_str().expect("lower1 path to str error") + ":" + lower2.path().to_str().expect("lower2 path to str error"), "upperdir=".to_string() + upperdir .path() .to_str() .expect("upperdir path to str error"), "workdir=".to_string() + workdir.path().to_str().expect("workdir path to str error"), ]; // mount target. let result = mount_rootfs(Some("overlay"), Some("overlay"), &options, &target); assert!(result.is_ok()); let mut mountinfo = get_mounts(Some(prefix_filter( target .path() .to_str() .expect("target path to str error") .to_string(), ))) .expect("get_mounts error"); // make sure the target has been mounted. assert_ne!(0, mountinfo.len()); // umount target. let result = umount_recursive(target.path().to_str(), 0); assert!(result.is_ok()); mountinfo = get_mounts(Some(prefix_filter( target .path() .to_str() .expect("target path to str error") .to_string(), ))) .expect("get_mounts error"); // make sure the target has been unmounted. assert_eq!(0, mountinfo.len()); } #[cfg(feature = "async")] #[test] fn test_setup_loop_dev() { let path = tempfile::NamedTempFile::new().expect("cannot create tempfile"); let backing_file = path.path().to_str(); let params = LoopParams { readonly: false, auto_clear: true, direct: true, }; let result = setup_loop(backing_file, params); assert!(result.is_ok()); } } ================================================ FILE: crates/shim/src/mount_other.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![allow(unused)] use std::path::Path; use crate::error::{Error, Result}; pub fn mount_rootfs( fs_type: Option<&str>, source: Option<&str>, options: &[String], target: impl AsRef, ) -> Result<()> { // On on-Linux systems, we should return OK // instead of exiting with an error. Ok(()) } pub fn umount_recursive(target: Option<&str>, flags: i32) -> Result<()> { Ok(()) } ================================================ FILE: crates/shim/src/reap.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use crate::error::Result; #[cfg(target_os = "linux")] /// Set current process as subreaper for child processes. /// /// A subreaper fulfills the role of `init` for its descendant processes. When a process becomes /// orphaned (i.e., its immediate parent terminates), then that process will be reparented to the /// nearest still living ancestor subreaper. Subsequently, calls to `getppid()` in the orphaned /// process will now return the PID of the subreaper process, and when the orphan terminates, /// it is the subreaper process that will receive a SIGCHLD signal and will be able to `wait()` /// on the process to discover its termination status. pub fn set_subreaper() -> Result<()> { use crate::error::Error; let ret = unsafe { libc::prctl(libc::PR_SET_CHILD_SUBREAPER, 1, 0, 0, 0) }; if ret < 0 { return Err(other!( "linux prctl returned: {}", std::io::Error::last_os_error() )); } Ok(()) } #[cfg(not(target_os = "linux"))] pub fn set_subreaper() -> Result<()> { Ok(()) } #[cfg(test)] #[cfg(target_os = "linux")] mod tests { use crate::reap::set_subreaper; #[test] fn test_set_subreaper() { set_subreaper().unwrap(); let mut val: libc::c_int = 0; let ret = unsafe { libc::prctl( libc::PR_GET_CHILD_SUBREAPER, &mut val as *mut libc::c_int as libc::c_ulong, 0, 0, 0, ) }; assert!(ret >= 0); assert!(val != 0); } } ================================================ FILE: crates/shim/src/synchronous/mod.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ //! A library to implement custom runtime v2 shims for containerd. //! //! # Runtime //! Runtime v2 introduces a first class shim API for runtime authors to integrate with containerd. //! The shim API is minimal and scoped to the execution lifecycle of a container. //! //! This crate simplifies shim v2 runtime development for containerd. It handles common tasks such //! as command line parsing, setting up shim's TTRPC server, logging, events, etc. //! //! Clients are expected to implement [Shim] and [Task] traits with task handling routines. //! This generally replicates same API as in Go [version](https://github.com/containerd/containerd/blob/main/runtime/v2/example/cmd/main.go). //! //! Once implemented, shim's bootstrap code is as easy as: //! ```text //! shim::run::("io.containerd.empty.v1") //! ``` //! macro_rules! cfg_unix { ($($item:item)*) => { $( #[cfg(unix)] $item )* } } macro_rules! cfg_windows { ($($item:item)*) => { $( #[cfg(windows)] $item )* } } use std::{ env, io::Write, process::{self, Command, Stdio}, sync::{Arc, Condvar, Mutex}, }; pub use log::{debug, error, info, warn}; use util::{read_address, write_address}; use crate::{ api::DeleteResponse, args::{self, Flags}, logger, protos::{ protobuf::Message, shim::shim_ttrpc::{create_task, Task}, ttrpc::{Client, Server}, }, reap, socket_address, start_listener, synchronous::publisher::RemotePublisher, Config, Error, Result, StartOpts, TTRPC_ADDRESS, }; cfg_unix! { use crate::parse_sockaddr; use libc::{SIGCHLD, SIGINT, SIGPIPE, SIGTERM}; use nix::{ errno::Errno, sys::{ signal::Signal, wait::{self, WaitPidFlag, WaitStatus}, }, unistd::Pid, }; use signal_hook::iterator::Signals; use std::os::unix::fs::FileTypeExt; use std::{convert::TryFrom, fs, path::Path}; } cfg_windows! { use std::{ io, ptr, fs::OpenOptions, os::windows::prelude::{AsRawHandle, OpenOptionsExt}, }; use windows_sys::Win32::{ Foundation::{CloseHandle, HANDLE}, System::{ Console::SetConsoleCtrlHandler, Threading::{CreateSemaphoreA, ReleaseSemaphore, WaitForSingleObject, INFINITE}, }, Storage::FileSystem::FILE_FLAG_OVERLAPPED }; static mut SEMAPHORE: HANDLE = 0 as HANDLE; const MAX_SEM_COUNT: i32 = 255; } pub mod monitor; pub mod publisher; pub mod util; /// Helper structure that wraps atomic bool to signal shim server when to shutdown the TTRPC server. /// /// Shim implementations are responsible for calling [`Self::signal`]. #[allow(clippy::mutex_atomic)] // Condvar expected to be used with Mutex, not AtomicBool. #[derive(Default)] pub struct ExitSignal(Mutex, Condvar); // Wrapper type to help hide platform specific signal handling. struct AppSignals { #[cfg(unix)] signals: Signals, } #[allow(clippy::mutex_atomic)] impl ExitSignal { /// Set exit signal to shutdown shim server. pub fn signal(&self) { let (lock, cvar) = (&self.0, &self.1); let mut exit = lock.lock().unwrap(); *exit = true; cvar.notify_all(); } /// Wait for the exit signal to be set. pub fn wait(&self) { let (lock, cvar) = (&self.0, &self.1); let mut started = lock.lock().unwrap(); while !*started { started = cvar.wait(started).unwrap(); } } } /// Main shim interface that must be implemented by all shims. /// /// Start and delete routines will be called to handle containerd's shim lifecycle requests. pub trait Shim { /// Type to provide task service for the shim. type T: Task + Send + Sync; /// Create a new instance of Shim. /// /// # Arguments /// - `runtime_id`: identifier of the container runtime. /// - `args`: command line arguments passed to the shim which includes namespace and id /// - `config`: for the shim to pass back configuration information fn new(runtime_id: &str, args: &Flags, config: &mut Config) -> Self; /// Start shim will be called by containerd when launching new shim instance. /// /// It expected to return TTRPC address containerd daemon can use to communicate with /// the given shim instance. /// /// See fn start_shim(&mut self, opts: StartOpts) -> Result; /// Delete shim will be called by containerd after shim shutdown to cleanup any leftovers. fn delete_shim(&mut self) -> Result; /// Wait for the shim to exit. fn wait(&mut self); /// Create the task service object. fn create_task_service(&self, publisher: RemotePublisher) -> Self::T; } /// Shim entry point that must be invoked from `main`. #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] pub fn run(runtime_id: &str, opts: Option) where T: Shim + Send + Sync + 'static, { if let Some(err) = bootstrap::(runtime_id, opts).err() { eprintln!("{}: {:?}", runtime_id, err); process::exit(1); } } #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] fn bootstrap(runtime_id: &str, opts: Option) -> Result<()> where T: Shim + Send + Sync + 'static, { // Parse command line let os_args: Vec<_> = env::args_os().collect(); let flags = args::parse(&os_args[1..])?; if flags.namespace.is_empty() { return Err(Error::InvalidArgument(String::from( "Shim namespace cannot be empty", ))); } let ttrpc_address = env::var(TTRPC_ADDRESS)?; // Create shim instance let mut config = opts.unwrap_or_default(); // Setup signals (On Linux need register signals before start main app according to signal_hook docs) let signals = setup_signals(&config); if !config.no_sub_reaper { reap::set_subreaper()?; } let mut shim = T::new(runtime_id, &flags, &mut config); match flags.action.as_str() { "start" => { let args = StartOpts { id: flags.id, publish_binary: flags.publish_binary, address: flags.address, ttrpc_address, namespace: flags.namespace, debug: flags.debug, }; let address = shim.start_shim(args)?; std::io::stdout() .lock() .write_fmt(format_args!("{}", address)) .map_err(io_error!(e, "write stdout"))?; Ok(()) } "delete" => { std::thread::spawn(move || handle_signals(signals)); let response = shim.delete_shim()?; let stdout = std::io::stdout(); let mut locked = stdout.lock(); response.write_to_writer(&mut locked)?; Ok(()) } _ => { if flags.socket.is_empty() { return Err(Error::InvalidArgument(String::from( "Shim socket cannot be empty", ))); } #[cfg(windows)] util::setup_debugger_event(); if !config.no_setup_logger { logger::init( flags.debug, &config.default_log_level, &flags.namespace, &flags.id, )?; } let publisher = publisher::RemotePublisher::new(&ttrpc_address)?; let task = Box::new(shim.create_task_service(publisher)) as Box; let task_service = create_task(Arc::from(task)); let Some(mut server) = create_server_with_retry(&flags)? else { signal_server_started(); return Ok(()); }; server = server.register_service(task_service); server.start()?; signal_server_started(); info!("Shim successfully started, waiting for exit signal..."); #[cfg(unix)] std::thread::spawn(move || handle_signals(signals)); shim.wait(); info!("Shutting down shim instance"); server.shutdown(); // NOTE: If the shim server is down(like oom killer), the address // socket might be leaking. let address = read_address()?; remove_socket_silently(&address); Ok(()) } } } #[cfg(windows)] #[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))] fn create_server(flags: &args::Flags) -> Result { start_listener(&flags.socket).map_err(io_error!(e, "starting listener"))?; let mut server = Server::new(); server = server.bind(&flags.socket)?; Ok(server) } #[cfg(unix)] #[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))] fn create_server(flags: &args::Flags) -> Result { use std::os::fd::IntoRawFd; let listener = start_listener(&flags.socket).map_err(io_error!(e, "starting listener"))?; let mut server = Server::new(); server = server.add_listener(listener.into_raw_fd())?; Ok(server) } #[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))] fn create_server_with_retry(flags: &args::Flags) -> Result> { // Really try to create a server. let server = match create_server(flags) { Ok(server) => server, Err(Error::IoError { err, .. }) if err.kind() == std::io::ErrorKind::AddrInUse => { // If the address is already in use then make sure it is up and running and return the address // This allows for running a single shim per container scenarios if let Ok(()) = wait_socket_working(&flags.socket, 5, 200) { write_address(&flags.socket)?; return Ok(None); } remove_socket(&flags.socket)?; create_server(flags)? } Err(e) => return Err(e), }; Ok(Some(server)) } #[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))] fn setup_signals(_config: &Config) -> Option { #[cfg(unix)] { let signals = Signals::new([SIGTERM, SIGINT, SIGPIPE]).expect("new signal failed"); if !_config.no_reaper { signals.add_signal(SIGCHLD).expect("add signal failed"); } Some(AppSignals { signals }) } #[cfg(windows)] { unsafe { SEMAPHORE = CreateSemaphoreA(ptr::null_mut(), 0, MAX_SEM_COUNT, ptr::null()); if SEMAPHORE == 0 { panic!("Failed to create semaphore: {}", io::Error::last_os_error()); } if SetConsoleCtrlHandler(Some(signal_handler), 1) == 0 { let e = io::Error::last_os_error(); CloseHandle(SEMAPHORE); SEMAPHORE = 0 as HANDLE; panic!("Failed to set console handler: {}", e); } } None } } #[cfg(windows)] unsafe extern "system" fn signal_handler(_: u32) -> i32 { ReleaseSemaphore(SEMAPHORE, 1, ptr::null_mut()); 1 } #[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "info"))] fn handle_signals(mut _signals: Option) { #[cfg(unix)] { let mut app_signals = _signals.take().unwrap(); loop { for sig in app_signals.signals.wait() { match sig { SIGTERM | SIGINT => { debug!("received {}", sig); } SIGCHLD => loop { // Note that this thread sticks to child even it is suspended. match wait::waitpid(Some(Pid::from_raw(-1)), Some(WaitPidFlag::WNOHANG)) { Ok(WaitStatus::Exited(pid, status)) => { monitor::monitor_notify_by_pid(pid.as_raw(), status) .unwrap_or_else(|e| error!("failed to send exit event {}", e)) } Ok(WaitStatus::Signaled(pid, sig, _)) => { debug!("child {} terminated({})", pid, sig); let exit_code = 128 + sig as i32; monitor::monitor_notify_by_pid(pid.as_raw(), exit_code) .unwrap_or_else(|e| error!("failed to send signal event {}", e)) } Ok(WaitStatus::StillAlive) => { break; } Err(Errno::ECHILD) => { break; } Err(e) => { // stick until all children will be successfully waited, even some unexpected error occurs. warn!("error occurred in signal handler: {}", e); } _ => {} // stick until exit } }, _ => { if let Ok(sig) = Signal::try_from(sig) { debug!("received {}", sig); } else { warn!("received invalid signal {}", sig); } } } } } } #[cfg(windows)] { // must start on thread as waitforSingleObject puts the current thread to sleep loop { unsafe { WaitForSingleObject(SEMAPHORE, INFINITE); //Windows doesn't have similar signal like SIGCHLD // We could implement something if required but for now } } } } #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] fn wait_socket_working(address: &str, interval_in_ms: u64, count: u32) -> Result<()> { for _i in 0..count { match Client::connect(address) { Ok(_) => { return Ok(()); } Err(_) => { std::thread::sleep(std::time::Duration::from_millis(interval_in_ms)); } } } Err(other!("time out waiting for socket {}", address)) } #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] fn remove_socket_silently(address: &str) { remove_socket(address).unwrap_or_else(|e| warn!("failed to remove file {} {:?}", address, e)) } #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] fn remove_socket(address: &str) -> Result<()> { #[cfg(unix)] { let path = parse_sockaddr(address); if let Ok(md) = Path::new(path).metadata() { if md.file_type().is_socket() { fs::remove_file(path).map_err(io_error!(e, "remove socket"))?; } } } #[cfg(windows)] { let mut opts = OpenOptions::new(); opts.read(true) .write(true) .custom_flags(FILE_FLAG_OVERLAPPED); if let Ok(f) = opts.open(address) { info!("attempting to remove existing named pipe: {}", address); unsafe { CloseHandle(f.as_raw_handle() as isize) }; } } Ok(()) } /// Spawn is a helper func to launch shim process. /// Typically this expected to be called from `StartShim`. #[cfg_attr(feature = "tracing", tracing::instrument(level = "info"))] pub fn spawn(opts: StartOpts, grouping: &str, vars: Vec<(&str, &str)>) -> Result<(u32, String)> { let cmd = env::current_exe().map_err(io_error!(e, ""))?; let cwd = env::current_dir().map_err(io_error!(e, ""))?; let address = socket_address(&opts.address, &opts.namespace, grouping); // Activation pattern comes from the hcsshim: https://github.com/microsoft/hcsshim/blob/v0.10.0-rc.7/cmd/containerd-shim-runhcs-v1/serve.go#L57-L70 // another way to do it would to create named pipe and pass it to the child process through handle inheritence but that would require duplicating // the logic in Rust's 'command' for process creation. There is an issue in Rust to make it simplier to specify handle inheritence and this could // be revisited once https://github.com/rust-lang/rust/issues/54760 is implemented. let mut command = Command::new(cmd); command .current_dir(cwd) .stdout(Stdio::piped()) .stdin(Stdio::null()) .stderr(Stdio::null()) .envs(vars) .args([ "-namespace", &opts.namespace, "-id", &opts.id, "-address", &opts.address, "-socket", &address, ]); if opts.debug { command.arg("-debug"); } // On Windows Rust currently sets the `HANDLE_FLAG_INHERIT` flag to true when using Command::spawn. // When a child process is spawned by another process (containerd) the child process inherits the parent's stdin, stdout, and stderr handles. // Due to the HANDLE_FLAG_INHERIT flag being set to true this will cause containerd to hand until the child process closes the handles. // As a workaround we can Disables inheritance on the io pipe handles. // This workaround comes from https://github.com/rust-lang/rust/issues/54760#issuecomment-1045940560 #[cfg(windows)] disable_handle_inheritance(); let mut child = command.spawn().map_err(io_error!(e, "spawn shim"))?; let mut reader = child.stdout.take().unwrap(); std::io::copy(&mut reader, &mut std::io::stderr()).unwrap(); Ok((child.id(), address)) } #[cfg(windows)] fn disable_handle_inheritance() { use windows_sys::Win32::{ Foundation::{SetHandleInformation, HANDLE_FLAG_INHERIT}, System::Console::{GetStdHandle, STD_ERROR_HANDLE, STD_INPUT_HANDLE, STD_OUTPUT_HANDLE}, }; unsafe { let std_err = GetStdHandle(STD_ERROR_HANDLE); let std_in = GetStdHandle(STD_INPUT_HANDLE); let std_out = GetStdHandle(STD_OUTPUT_HANDLE); for handle in [std_err, std_in, std_out] { SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0); //info!(" handle for... {:?}", handle); //CloseHandle(handle); } } } // This closes the stdout handle which was mapped to the stderr on the first invocation of the shim. // This releases first process which will give containerd the address of the namedpipe. #[cfg(windows)] fn signal_server_started() { use windows_sys::Win32::System::Console::{GetStdHandle, STD_OUTPUT_HANDLE}; unsafe { let std_out = GetStdHandle(STD_OUTPUT_HANDLE); { let handle = std_out; CloseHandle(handle); } } } // This closes the stdout handle which was mapped to the stderr on the first invocation of the shim. // This releases first process which will give containerd the address of the namedpipe. #[cfg(unix)] fn signal_server_started() { use libc::{dup2, STDERR_FILENO, STDOUT_FILENO}; unsafe { if dup2(STDERR_FILENO, STDOUT_FILENO) < 0 { panic!("Error closing pipe: {}", std::io::Error::last_os_error()) } } } #[cfg(test)] mod tests { use std::thread; use super::*; #[test] fn exit_signal() { let signal = Arc::new(ExitSignal::default()); let cloned = Arc::clone(&signal); let handle = thread::spawn(move || { cloned.signal(); }); signal.wait(); if let Err(err) = handle.join() { panic!("{:?}", err); } } struct Nop {} struct NopTask {} impl Task for NopTask {} impl Shim for Nop { type T = NopTask; fn new(_runtime_id: &str, _args: &Flags, _config: &mut Config) -> Self { Nop {} } fn start_shim(&mut self, _opts: StartOpts) -> Result { Ok("".to_string()) } fn delete_shim(&mut self) -> Result { Ok(DeleteResponse::default()) } fn wait(&mut self) {} fn create_task_service(&self, _publisher: RemotePublisher) -> Self::T { NopTask {} } } #[test] fn no_namespace() { let runtime_id = "test"; let res = bootstrap::(runtime_id, None); assert!(res.is_err()); assert!(res .unwrap_err() .to_string() .contains("Shim namespace cannot be empty")); } } ================================================ FILE: crates/shim/src/synchronous/monitor.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{ collections::HashMap, sync::{ mpsc::{channel, Receiver, Sender}, LazyLock, Mutex, }, }; use log::{error, warn}; use crate::{ monitor::{ExitEvent, Subject, Topic}, Result, }; pub static MONITOR: LazyLock> = LazyLock::new(|| { Mutex::new(Monitor { seq_id: 0, subscribers: HashMap::new(), topic_subs: HashMap::new(), }) }); pub fn monitor_subscribe(topic: Topic) -> Result { let mut monitor = MONITOR.lock().unwrap(); let s = monitor.subscribe(topic)?; Ok(s) } pub fn monitor_notify_by_pid(pid: i32, exit_code: i32) -> Result<()> { let monitor = MONITOR.lock().unwrap(); monitor.notify_by_pid(pid, exit_code) } pub fn monitor_notify_by_exec(id: &str, exec_id: &str, exit_code: i32) -> Result<()> { let monitor = MONITOR.lock().unwrap(); monitor.notify_by_exec(id, exec_id, exit_code) } pub struct Monitor { pub(crate) seq_id: i64, pub(crate) subscribers: HashMap, pub(crate) topic_subs: HashMap>, } pub(crate) struct Subscriber { pub(crate) topic: Topic, pub(crate) tx: Sender, } pub struct Subscription { pub id: i64, pub rx: Receiver, } impl Monitor { pub fn subscribe(&mut self, topic: Topic) -> Result { let (tx, rx) = channel::(); let id = self.seq_id; self.seq_id += 1; let subscriber = Subscriber { tx, topic: topic.clone(), }; self.subscribers.insert(id, subscriber); self.topic_subs.entry(topic).or_default().push(id); Ok(Subscription { id, rx }) } pub fn notify_by_pid(&self, pid: i32, exit_code: i32) -> Result<()> { let subject = Subject::Pid(pid); self.notify_topic(&Topic::Pid, &subject, exit_code); self.notify_topic(&Topic::All, &subject, exit_code); Ok(()) } pub fn notify_by_exec(&self, cid: &str, exec_id: &str, exit_code: i32) -> Result<()> { let subject = Subject::Exec(cid.into(), exec_id.into()); self.notify_topic(&Topic::Exec, &subject, exit_code); self.notify_topic(&Topic::All, &subject, exit_code); Ok(()) } fn notify_topic(&self, topic: &Topic, subject: &Subject, exit_code: i32) { self.topic_subs.get(topic).map_or((), |subs| { for i in subs { self.subscribers.get(i).and_then(|sub| { sub.tx .send(ExitEvent { subject: subject.clone(), exit_code, }) .map_err(|e| warn!("failed to send {}", e)) .ok() }); } }) } pub fn unsubscribe(&mut self, id: i64) -> Result<()> { let sub = self.subscribers.remove(&id); if let Some(s) = sub { self.topic_subs.get_mut(&s.topic).map(|v| { v.iter().position(|&x| x == id).map(|i| { v.remove(i); }) }); } Ok(()) } } impl Drop for Subscription { fn drop(&mut self) { let mut monitor = MONITOR.lock().unwrap(); monitor.unsubscribe(self.id).unwrap_or_else(|e| { error!("failed to unsubscribe the subscription {}, {}", self.id, e); }); } } pub fn wait_pid(pid: i32, s: Subscription) -> i32 { loop { if let Ok(ExitEvent { subject: Subject::Pid(epid), exit_code: code, }) = s.rx.recv() { if pid == epid { return code; } } } } ================================================ FILE: crates/shim/src/synchronous/publisher.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ //! Implements a client to publish events from the shim back to containerd. use client::{ protobuf::MessageDyn, shim::{event::Envelope, events}, ttrpc::{self, context::Context}, types::empty, Client, Events, EventsClient, }; use containerd_shim_protos as client; #[cfg(unix)] use crate::util::connect; #[cfg(target_os = "windows")] // Prevent unused warning. use crate::Error; use crate::{ error::Result, util::{convert_to_any, timestamp}, }; #[cfg(windows)] const RETRY_COUNT: i32 = 3; /// Remote publisher connects to containerd's TTRPC endpoint to publish events from shim. pub struct RemotePublisher { client: EventsClient, } impl RemotePublisher { /// Connect to containerd's TTRPC endpoint. /// /// containerd uses `/run/containerd/containerd.sock.ttrpc` by default pub fn new(address: impl AsRef) -> Result { let client = Self::connect(address)?; Ok(RemotePublisher { client: EventsClient::new(client), }) } fn connect(address: impl AsRef) -> Result { #[cfg(unix)] { let fd = connect(address)?; // Client::new() takes ownership of the RawFd. Client::new(fd).map_err(|err| err.into()) } #[cfg(windows)] { for i in 0..RETRY_COUNT { match Client::connect(address.as_ref()) { Ok(client) => return Ok(client), Err(e) => match e { ttrpc::Error::Windows(231) => { // ERROR_PIPE_BUSY log::debug!("pipe busy during connect. try number {}", i); std::thread::sleep(std::time::Duration::from_millis(5)); } _ => return Err(e.into()), }, } } Err(other!("failed to connect to {}", address.as_ref())) } } /// Publish a new event. /// /// Event object can be anything that Protobuf able serialize (e.g. implement `Message` trait). pub fn publish( &self, ctx: Context, topic: &str, namespace: &str, event: Box, ) -> Result<()> { let mut envelope = Envelope::new(); envelope.set_topic(topic.to_owned()); envelope.set_namespace(namespace.to_owned()); envelope.set_timestamp(timestamp()?); envelope.set_event(convert_to_any(event)?); let mut req = events::ForwardRequest::new(); req.set_envelope(envelope); self.client.forward(ctx, &req)?; Ok(()) } } impl Events for RemotePublisher { fn forward( &self, _ctx: &ttrpc::TtrpcContext, req: events::ForwardRequest, ) -> ttrpc::Result { self.client.forward(Context::default(), &req) } } #[cfg(test)] mod tests { use std::sync::{Arc, Barrier}; use client::{ api::{Empty, ForwardRequest}, events::task::TaskOOM, }; use ttrpc::Server; use super::*; #[cfg(windows)] use crate::synchronous::wait_socket_working; struct FakeServer {} impl Events for FakeServer { fn forward(&self, _ctx: &ttrpc::TtrpcContext, req: ForwardRequest) -> ttrpc::Result { let env = req.envelope(); assert_eq!(env.topic(), "/tasks/oom"); Ok(Empty::default()) } } #[test] fn test_connect() { #[cfg(unix)] let tmpdir = tempfile::tempdir().unwrap(); #[cfg(unix)] let path = format!("{}/socket", tmpdir.as_ref().to_str().unwrap()); #[cfg(windows)] let path = "\\\\.\\pipe\\test-pipe".to_string(); let path1 = path.clone(); assert!(RemotePublisher::connect("a".repeat(16384)).is_err()); assert!(RemotePublisher::connect(&path).is_err()); let barrier = Arc::new(Barrier::new(2)); let barrier2 = barrier.clone(); let thread = std::thread::spawn(move || { let mut server = create_server(&path1); server.start().unwrap(); #[cfg(windows)] // make sure pipe is ready on windows wait_socket_working(&path1, 5, 5).unwrap(); barrier2.wait(); barrier2.wait(); server.shutdown(); }); barrier.wait(); let client = RemotePublisher::new(&path).unwrap(); let mut msg = TaskOOM::new(); msg.set_container_id("test".to_string()); client .publish(Context::default(), "/tasks/oom", "ns1", Box::new(msg)) .unwrap(); barrier.wait(); thread.join().unwrap(); } fn create_server(server_address: &str) -> Server { #[cfg(unix)] { use std::os::unix::{io::AsRawFd, net::UnixListener}; let listener = UnixListener::bind(server_address).unwrap(); listener.set_nonblocking(true).unwrap(); let task = Box::new(FakeServer {}) as Box; let service = client::create_events(task.into()); let server = Server::new() .add_listener(listener.as_raw_fd()) .unwrap() .register_service(service); std::mem::forget(listener); server } #[cfg(windows)] { let service = client::create_events(Arc::new(FakeServer {})); Server::new() .bind(server_address) .unwrap() .register_service(service) } } } ================================================ FILE: crates/shim/src/synchronous/util.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{ fs::{rename, File, OpenOptions}, io::{Read, Write}, path::Path, }; use containerd_shim_protos::shim::oci::Options; #[cfg(unix)] use libc::mode_t; use log::warn; #[cfg(unix)] use nix::sys::stat::Mode; use oci_spec::runtime::Spec; use crate::{ util::{JsonOptions, OPTIONS_FILE_NAME, RUNTIME_FILE_NAME}, Error, }; pub fn read_file_to_str>(filename: P) -> crate::Result { let mut file = File::open(&filename).map_err(io_error!( e, "open {}", filename.as_ref().to_string_lossy() ))?; let mut content: String = String::new(); file.read_to_string(&mut content).map_err(io_error!( e, "read {}", filename.as_ref().to_string_lossy() ))?; Ok(content) } pub fn read_options(bundle: impl AsRef) -> crate::Result { let path = bundle.as_ref().join(OPTIONS_FILE_NAME); let opts_str = read_file_to_str(path)?; let json_opt: JsonOptions = serde_json::from_str(&opts_str)?; Ok(json_opt.into()) } pub fn read_runtime(bundle: impl AsRef) -> crate::Result { let path = bundle.as_ref().join(RUNTIME_FILE_NAME); read_file_to_str(path) } pub fn read_address() -> crate::Result { let path = Path::new("address"); read_file_to_str(path) } pub fn read_pid_from_file(pid_path: &Path) -> crate::Result { let pid_str = read_file_to_str(pid_path)?; let pid = pid_str.parse::()?; Ok(pid) } pub fn write_str_to_path(filename: &Path, s: &str) -> crate::Result<()> { let file = filename .file_name() .ok_or_else(|| Error::InvalidArgument(String::from("pid path illegal")))?; let tmp_path = filename .parent() .map(|x| x.join(format!(".{}", file.to_str().unwrap_or("")))) .ok_or_else(|| Error::InvalidArgument(String::from("failed to create tmp path")))?; let tmp_path = tmp_path .to_str() .ok_or_else(|| Error::InvalidArgument(String::from("failed to get path")))?; let mut f = OpenOptions::new() .write(true) .create_new(true) .open(tmp_path) .map_err(io_error!(e, "open {}", filename.to_str().unwrap()))?; f.write_all(s.as_bytes()) .map_err(io_error!(e, "write tmp file"))?; rename(tmp_path, filename).map_err(io_error!( e, "rename tmp file to {}", filename.to_str().unwrap() ))?; Ok(()) } pub fn write_options(bundle: &str, opt: &Options) -> crate::Result<()> { let json_opt = JsonOptions::from(opt.to_owned()); let opts_str = serde_json::to_string(&json_opt)?; let path = Path::new(bundle).join(OPTIONS_FILE_NAME); write_str_to_path(path.as_path(), opts_str.as_str()) } pub fn write_runtime(bundle: &str, binary_name: &str) -> crate::Result<()> { let path = Path::new(bundle).join(RUNTIME_FILE_NAME); write_str_to_path(path.as_path(), binary_name) } pub fn write_address(address: &str) -> crate::Result<()> { let path = Path::new("address"); write_str_to_path(path, address) } pub fn read_spec_from_file(bundle: &str) -> crate::Result { let path = Path::new(bundle).join("config.json"); Spec::load(path).map_err(other_error!("read spec file")) } #[cfg(unix)] pub fn mkdir(path: impl AsRef, mode: mode_t) -> crate::Result<()> { let path_buf = path.as_ref().to_path_buf(); if !path_buf.as_path().exists() { let mode = Mode::from_bits(mode).ok_or_else(|| other!("invalid dir mode {}", mode))?; nix::unistd::mkdir(path_buf.as_path(), mode)?; } Ok(()) } /// A helper to help remove temperate file or dir when it became useless pub struct HelperRemoveFile { path: String, } impl HelperRemoveFile { pub fn new(path: String) -> Self { Self { path } } } impl Drop for HelperRemoveFile { fn drop(&mut self) { std::fs::remove_file(&self.path) .unwrap_or_else(|e| warn!("remove dir {} error: {}", &self.path, e)); } } #[cfg(target_os = "windows")] // helper to configure pause thread until signaled. Useful in attaching a debugger // https://github.com/microsoft/hcsshim/blob/v0.10.0-rc.7/cmd/containerd-shim-runhcs-v1/serve.go#L313-L315 // use with https://github.com/moby/docker-signal pub(crate) fn setup_debugger_event() { use std::{env, io, process}; use log::{debug, error}; use windows_sys::Win32::System::Threading::{WaitForSingleObject, INFINITE}; let debugger = env::var("SHIM_DEBUGGER").unwrap_or_else(|_| "".to_string()); if debugger.is_empty() { return; } let event_name = format!("Global\\debugger-{}", process::id()); debug!("Halting until signalled: {}", event_name); let e = match create_event(event_name) { Ok(e) => e, Err(e) => { error!("failed to create event for debugger: {}", e); return; } }; match unsafe { WaitForSingleObject(e, INFINITE) } { 0 => {} _ => { error!( "failed to wait for debugger event: {}", io::Error::last_os_error() ); return; } } debug!("signal received, continuing"); } #[cfg(target_os = "windows")] fn create_event(name: String) -> crate::Result { use std::{ffi::OsStr, io, os::windows::prelude::OsStrExt}; use windows_sys::Win32::System::Threading::CreateEventW; let name = OsStr::new(name.as_str()) .encode_wide() .chain(Some(0)) // add NULL termination .collect::>(); let result = unsafe { CreateEventW(std::ptr::null_mut(), 0, 0, name.as_ptr()) }; match result { 0 => Err(Error::Other(io::Error::last_os_error().to_string())), _ => Ok(result), } } ================================================ FILE: crates/shim/src/util.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #[cfg(unix)] use std::os::unix::io::RawFd; use std::time::{SystemTime, UNIX_EPOCH}; use serde::{Deserialize, Serialize}; use time::OffsetDateTime; #[cfg(feature = "async")] pub use crate::asynchronous::util::*; #[cfg(not(feature = "async"))] pub use crate::synchronous::util::*; use crate::{ api::Options, error::Result, protos::protobuf::{ well_known_types::{any::Any, timestamp::Timestamp}, MessageDyn, }, }; pub const CONFIG_FILE_NAME: &str = "config.json"; pub const OPTIONS_FILE_NAME: &str = "options.json"; pub const RUNTIME_FILE_NAME: &str = "runtime"; // Define JsonOptions here for Json serialize and deserialize // as rust-protobuf hasn't released serde_derive feature, // see https://github.com/stepancheg/rust-protobuf/#serde_derive-support #[derive(Debug, Deserialize, Serialize)] #[serde(deny_unknown_fields)] pub struct JsonOptions { #[serde(default)] pub no_pivot_root: bool, #[serde(default)] pub no_new_keyring: bool, pub shim_cgroup: ::std::string::String, #[serde(default)] pub io_uid: u32, #[serde(default)] pub io_gid: u32, pub binary_name: ::std::string::String, pub root: ::std::string::String, #[serde(default)] pub systemd_cgroup: bool, pub criu_image_path: ::std::string::String, pub criu_work_path: ::std::string::String, #[serde(default)] pub task_api_address: ::std::string::String, #[serde(default)] pub task_api_version: u32, } impl From for JsonOptions { fn from(o: Options) -> Self { Self { no_pivot_root: o.no_pivot_root, no_new_keyring: o.no_new_keyring, shim_cgroup: o.shim_cgroup, io_uid: o.io_uid, io_gid: o.io_gid, binary_name: o.binary_name, root: o.root, systemd_cgroup: o.systemd_cgroup, criu_image_path: o.criu_image_path, criu_work_path: o.criu_work_path, task_api_address: o.task_api_address, task_api_version: o.task_api_version, } } } impl From for Options { fn from(j: JsonOptions) -> Self { Self { no_pivot_root: j.no_pivot_root, no_new_keyring: j.no_new_keyring, shim_cgroup: j.shim_cgroup, io_uid: j.io_uid, io_gid: j.io_gid, binary_name: j.binary_name, root: j.root, systemd_cgroup: j.systemd_cgroup, criu_image_path: j.criu_image_path, criu_work_path: j.criu_work_path, task_api_address: j.task_api_address, task_api_version: j.task_api_version, ..Default::default() } } } #[cfg(unix)] pub fn connect(address: impl AsRef) -> Result { use std::os::fd::IntoRawFd; use nix::{sys::socket::*, unistd::close}; let unix_addr = UnixAddr::new(address.as_ref())?; // SOCK_CLOEXEC flag is Linux specific #[cfg(target_os = "linux")] const SOCK_CLOEXEC: SockFlag = SockFlag::SOCK_CLOEXEC; #[cfg(not(target_os = "linux"))] const SOCK_CLOEXEC: SockFlag = SockFlag::empty(); let fd = socket(AddressFamily::Unix, SockType::Stream, SOCK_CLOEXEC, None)?.into_raw_fd(); // MacOS doesn't support atomic creation of a socket descriptor with `SOCK_CLOEXEC` flag, // so there is a chance of leak if fork + exec happens in between of these calls. #[cfg(not(target_os = "linux"))] { use std::os::fd::BorrowedFd; use nix::fcntl::{fcntl, FcntlArg, FdFlag}; // SAFETY: fd is a valid file descriptor that we just created let borrowed_fd = unsafe { BorrowedFd::borrow_raw(fd) }; fcntl(borrowed_fd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC)).inspect_err(|_| { let _ = close(fd); })?; } connect(fd, &unix_addr).inspect_err(|_| { let _ = close(fd); })?; Ok(fd) } pub fn timestamp() -> Result { let now = SystemTime::now().duration_since(UNIX_EPOCH)?; let ts = Timestamp { seconds: now.as_secs() as _, nanos: now.subsec_nanos() as _, ..Default::default() }; Ok(ts) } pub fn convert_to_timestamp(exited_at: Option) -> Timestamp { let mut ts = Timestamp::new(); if let Some(ea) = exited_at { ts.seconds = ea.unix_timestamp(); ts.nanos = ea.nanosecond() as i32; } ts } pub fn convert_to_any(obj: Box) -> Result { let mut data = Vec::new(); obj.write_to_vec_dyn(&mut data)?; let mut any = Any::new(); any.value = data; any.type_url = obj.descriptor_dyn().full_name().to_string(); Ok(any) } pub trait IntoOption where Self: Sized, { fn none_if(self, callback: F) -> Option where F: Fn(&Self) -> bool, { if callback(&self) { None } else { Some(self) } } } impl IntoOption for T {} pub trait AsOption { fn as_option(&self) -> Option<&Self>; } impl AsOption for str { fn as_option(&self) -> Option<&Self> { if self.is_empty() { None } else { Some(self) } } } #[cfg(test)] mod tests { use super::*; #[test] fn test_timestamp() { let ts = timestamp().unwrap(); assert!(ts.seconds > 0); } } ================================================ FILE: crates/shim-protos/Cargo.toml ================================================ [package] name = "containerd-shim-protos" version = "0.11.0" authors = [ "Maksym Pavlenko ", "The containerd Authors", ] description = "TTRPC bindings for containerd shim interfaces" keywords = ["containerd", "shim", "containers", "ttrpc", "client"] categories = ["api-bindings", "asynchronous"] edition.workspace = true license.workspace = true repository.workspace = true homepage.workspace = true [features] default = [] async = ["ttrpc/async", "async-trait"] sandbox = [] docs = [] [[example]] name = "shim-proto-server" path = "examples/ttrpc-server.rs" [[example]] name = "shim-proto-client" path = "examples/ttrpc-client.rs" [[example]] name = "shim-proto-connect" path = "examples/connect.rs" [[example]] name = "shim-proto-server-async" path = "examples/ttrpc-server-async.rs" required-features = ["async"] [[example]] name = "shim-proto-client-async" path = "examples/ttrpc-client-async.rs" required-features = ["async"] [[example]] name = "shim-proto-connect-async" path = "examples/connect-async.rs" required-features = ["async"] [dependencies] async-trait = { workspace = true, optional = true } protobuf = { version = "3.7", default-features = false } ttrpc = { version = "0.9", default-features = false, features = ["sync"] } [build-dependencies] ttrpc-codegen = "0.6.0" [dev-dependencies] ctrlc = { version = "3.5", default-features = false, features = ["termination"] } simple_logger = { workspace = true, features = ["stderr"] } tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } crossbeam = { workspace = true, features = ["crossbeam-channel"] } # Used by create_ttrpc_context() [package.metadata.docs.rs] features = ["docs"] ================================================ FILE: crates/shim-protos/README.md ================================================ # Shim protos and client for containerd [![Crates.io](https://img.shields.io/crates/v/containerd-shim-protos)](https://crates.io/crates/containerd-shim-protos) [![docs.rs](https://img.shields.io/docsrs/containerd-shim-protos)](https://docs.rs/containerd-shim-protos/latest/containerd_shim_protos/) [![Crates.io](https://img.shields.io/crates/l/containerd-shim-protos)](https://github.com/containerd/rust-extensions/blob/main/LICENSE) [![CI](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml) `containerd-shim-protos` contains TTRPC bindings and client/server code to interact with containerd's runtime v2 shims. ## Runtime This crate is mainly expected to be useful to interact with containerd's shim runtime. Runtime v2 introduces a first class shim API for runtime authors to integrate with containerd. The shim API is minimal and scoped to the execution lifecycle of a container. To learn how containerd's shim v2 runtime works in details, please refer to the [documentation](https://github.com/containerd/containerd/blob/main/core/runtime/v2/README.md). ## Design The `containerd-shim-protos` crate provides [Protobuf](https://github.com/protocolbuffers/protobuf.git) message and [TTRPC](https://github.com/containerd/ttrpc.git) service definitions for the [Containerd shim v2](https://github.com/containerd/containerd/blob/main/api/runtime/task/v2/shim.proto) protocol. The message and service definitions are auto-generated from protobuf source files under `vendor/` by using [ttrpc-codegen](https://github.com/containerd/ttrpc-rust/tree/master/ttrpc-codegen). So please do not edit those auto-generated source files. If upgrading/modification is needed, please follow the steps: - Synchronize the latest protobuf source files from the upstream projects into directory 'vendor/'. - Re-generate the source files by `cargo build --features=generate_bindings`. - Commit the synchronized protobuf source files and auto-generated source files, keeping them in synchronization. ## Usage Add `containerd-shim-client` as a dependency in your `Cargo.toml` ```toml [dependencies] containerd-shim-protos = "0.4" ``` Basic client code looks as follows: ```rust,no_run use containerd_shim_protos as client; let client = client::Client::connect("unix:///containerd-shim/shim.sock").expect("Failed to connect to shim"); let task_client = client::TaskClient::new(client); let context = client::ttrpc::context::with_timeout(0); let req = client::api::ConnectRequest { id: String::from("1"), ..Default::default() }; let resp = task_client.connect(context, &req).expect("Connect request failed"); ``` ## Example - [TTRPC shim client](./examples/ttrpc-client.rs) - [TTRPC shim server](./examples/ttrpc-server.rs) - [TTRPC client connect](./examples/connect.rs). The way to build the example: ```bash # build sync connect, client and server $ cargo build --example shim-proto-connect $ sudo ./shim-proto-connect unix:///containerd-shim/shim_socket_path.sock $ cargo build --example shim-proto-client $ cargo build --example shim-proto-server # build async connect, client and server $ cargo build --example shim-proto-connect-async --features async $ cargo build --example shim-proto-client-async --features async $ cargo build --example shim-proto-server-async --features async ``` ================================================ FILE: crates/shim-protos/build.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{ env, fs, fs::File, io::{BufRead, BufReader}, path::PathBuf, }; use ttrpc_codegen::{Codegen, Customize, ProtobufCustomize}; fn main() { genmodule( "types", &[ "vendor/gogoproto/gogo.proto", "vendor/google/protobuf/empty.proto", "vendor/github.com/containerd/containerd/api/types/fieldpath.proto", "vendor/github.com/containerd/containerd/api/types/mount.proto", "vendor/github.com/containerd/containerd/api/types/task/task.proto", "vendor/github.com/containerd/containerd/api/types/introspection.proto", #[cfg(feature = "sandbox")] "vendor/github.com/containerd/containerd/api/types/platform.proto", ], false, ); genmodule( "cgroups", &["vendor/github.com/containerd/cgroups/stats/v1/metrics.proto"], false, ); genmodule( "cgroups_v2", &["vendor/github.com/containerd/cgroups/cgroup2/stats/metrics.proto"], false, ); genmodule( "stats", &["vendor/microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto"], false, ); genmodule( "events", &[ "vendor/github.com/containerd/containerd/api/types/mount.proto", "vendor/github.com/containerd/containerd/api/events/container.proto", "vendor/github.com/containerd/containerd/api/events/content.proto", "vendor/github.com/containerd/containerd/api/events/image.proto", "vendor/github.com/containerd/containerd/api/events/namespace.proto", "vendor/github.com/containerd/containerd/api/events/sandbox.proto", "vendor/github.com/containerd/containerd/api/events/snapshot.proto", "vendor/github.com/containerd/containerd/api/events/task.proto", ], false, ); genmodule( "shim", &[ "vendor/github.com/containerd/containerd/api/types/runc/options/oci.proto", "vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.proto", "vendor/github.com/containerd/containerd/api/types/event.proto", "vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto", ], false, ); #[cfg(feature = "async")] { genmodule( "shim_async", &[ "vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.proto", "vendor/github.com/containerd/containerd/api/types/event.proto", "vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto", ], true, ); } #[cfg(feature = "sandbox")] { genmodule( "sandbox", &[ "vendor/github.com/containerd/containerd/api/types/metrics.proto", "vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto", ], false, ); #[cfg(feature = "async")] genmodule( "sandbox_async", &[ "vendor/github.com/containerd/containerd/api/types/metrics.proto", "vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto", ], true, ); } } fn genmodule(name: &str, inputs: &[&str], async_all: bool) { let mut out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); out_path.push(name); fs::create_dir_all(&out_path).unwrap(); Codegen::new() .inputs(inputs) // Order matters: containerd/api first so containerd's internal // imports like `types/fieldpath.proto` (new in v2.3.0) resolve // there. `vendor/` second for third-party imports // (google/, gogoproto/, microsoft/, github.com/containerd/cgroups/). .include("vendor/github.com/containerd/containerd/api/") .include("vendor/") .rust_protobuf() .rust_protobuf_customize( ProtobufCustomize::default() .gen_mod_rs(true) .generate_accessors(true), ) .customize(Customize { async_all, ..Default::default() }) .out_dir(&out_path) .run() .expect("Failed to generate protos"); // Find all *.rs files generated by TTRPC codegen let files = fs::read_dir(&out_path) .unwrap() .filter_map(|entry| { let entry = entry.unwrap(); if !entry.file_type().unwrap().is_file() { None } else { Some(entry.path()) } }) .collect::>(); // `include!` doesn't handle files with attributes: // - https://github.com/rust-lang/rust/issues/18810 // - https://github.com/rust-lang/rfcs/issues/752 // Remove all lines that start with: // - #![allow(unknown_lints)] // - #![cfg_attr(rustfmt, rustfmt::skip)] // for path in files { let file = File::open(&path).unwrap(); let joined = BufReader::new(file) .lines() .filter_map(|line| { let line = line.unwrap(); if line.starts_with("#!") || line.starts_with("//!") { None } else { Some(line) } }) .collect::>() .join("\r\n"); fs::write(&path, joined).unwrap(); } } ================================================ FILE: crates/shim-protos/examples/connect-async.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::env; use client::{api, shim::shim_ttrpc_async::TaskClient}; use containerd_shim_protos as client; use ttrpc::{asynchronous::Client, context::Context}; #[tokio::main] async fn main() { let args: Vec = env::args().collect(); let socket_path = args .get(1) .ok_or("First argument must be shim socket path") .unwrap(); let pid = args.get(2).map(|str| str.to_owned()).unwrap_or_default(); println!("Connecting to {}...", socket_path); let client = Client::connect(socket_path) .await .expect("Failed to connect to shim"); let task_client = TaskClient::new(client); let context = Context::default(); let req = api::ConnectRequest { id: pid, ..Default::default() }; println!("Sending `Connect` request..."); let resp = task_client .connect(context.clone(), &req) .await .expect("Connect request failed"); println!("Connect response: {:?}", resp); let req = api::ShutdownRequest { id: "123".to_string(), now: true, ..Default::default() }; println!("Sending `Shutdown` request..."); let resp = task_client .shutdown(context, &req) .await .expect("Failed to send shutdown request"); println!("Shutdown response: {:?}", resp) } ================================================ FILE: crates/shim-protos/examples/connect.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::env; use client::api; use containerd_shim_protos as client; use ttrpc::context::Context; fn main() { let args: Vec = env::args().collect(); let socket_path = args .get(1) .ok_or("First argument must be shim socket path") .unwrap(); let pid = args.get(2).map(|str| str.to_owned()).unwrap_or_default(); println!("Connecting to {}...", socket_path); let client = client::Client::connect(socket_path).expect("Failed to connect to shim"); let task_client = client::TaskClient::new(client); let context = Context::default(); let req = api::ConnectRequest { id: pid, ..Default::default() }; println!("Sending `Connect` request..."); let resp = task_client .connect(context.clone(), &req) .expect("Connect request failed"); println!("Connect response: {:?}", resp); let req = api::ShutdownRequest { id: "123".to_string(), now: true, ..Default::default() }; println!("Sending `Shutdown` request..."); let resp = task_client .shutdown(context, &req) .expect("Failed to send shutdown request"); println!("Shutdown response: {:?}", resp) } ================================================ FILE: crates/shim-protos/examples/ttrpc-client-async.rs ================================================ // Copyright (c) 2019 Ant Financial // Copyright (c) 2021 Ant Group // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use containerd_shim_protos::{api::CreateTaskRequest, shim::shim_ttrpc_async::TaskClient}; use ttrpc::{ asynchronous::Client, context::{self, Context}, }; fn default_ctx() -> Context { let mut ctx = context::with_timeout(0); ctx.add("key-1".to_string(), "value-1-1".to_string()); ctx.add("key-1".to_string(), "value-1-2".to_string()); ctx.set("key-2".to_string(), vec!["value-2".to_string()]); ctx } #[tokio::main] async fn main() { let c = Client::connect("unix:///tmp/shim-proto-ttrpc-001") .await .unwrap(); let task = TaskClient::new(c); let now = std::time::Instant::now(); let mut req = CreateTaskRequest::new(); req.set_id("id1".to_owned()); println!( "OS Thread {:?} - task.create() started: {:?}", std::thread::current().id(), now.elapsed(), ); let resp = task.create(default_ctx(), &req).await.unwrap(); assert_eq!(resp.pid, 0x10c0); println!( "OS Thread {:?} - task.create() -> {:?} ended: {:?}", std::thread::current().id(), resp, now.elapsed(), ); } ================================================ FILE: crates/shim-protos/examples/ttrpc-client.rs ================================================ // Copyright (c) 2019 Ant Financial // Copyright (c) 2021 Ant Group // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use containerd_shim_protos::{api::CreateTaskRequest, TaskClient}; use ttrpc::{ context::{self, Context}, Client, }; fn main() { let c = Client::connect("unix:///tmp/shim-proto-ttrpc-001").unwrap(); let task = TaskClient::new(c); let now = std::time::Instant::now(); let mut req = CreateTaskRequest::new(); req.set_id("id1".to_owned()); println!( "OS Thread {:?} - task.create() started: {:?}", std::thread::current().id(), now.elapsed(), ); let resp = task.create(default_ctx(), &req).unwrap(); assert_eq!(resp.pid, 0x10c0); println!( "OS Thread {:?} - task.create() -> {:?} ended: {:?}", std::thread::current().id(), resp, now.elapsed(), ); } fn default_ctx() -> Context { let mut ctx = context::with_timeout(0); ctx.add("key-1".to_string(), "value-1-1".to_string()); ctx.add("key-1".to_string(), "value-1-2".to_string()); ctx.set("key-2".to_string(), vec!["value-2".to_string()]); ctx } ================================================ FILE: crates/shim-protos/examples/ttrpc-server-async.rs ================================================ // Copyright (c) 2019 Ant Financial // Copyright (c) 2021 Ant Group // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::{sync::Arc, thread}; use async_trait::async_trait; use containerd_shim_protos::{ api::{CreateTaskRequest, CreateTaskResponse}, shim::shim_ttrpc_async::{create_task, Task}, }; use ttrpc::asynchronous::Server; #[derive(Debug, PartialEq)] struct FakeServer { magic: u32, } impl FakeServer { fn new() -> Self { FakeServer { magic: 0xadcbdacf } } } #[async_trait] impl Task for FakeServer { async fn create( &self, ctx: &::ttrpc::asynchronous::TtrpcContext, req: CreateTaskRequest, ) -> ::ttrpc::Result { let mut resp = CreateTaskResponse::default(); let md = &ctx.metadata; let v1 = md.get("key-1").unwrap(); let v2 = md.get("key-2").unwrap(); assert_eq!(v1[0], "value-1-1"); assert_eq!(v1[1], "value-1-2"); assert_eq!(v2[0], "value-2"); assert_eq!(&req.id, "id1"); resp.set_pid(0x10c0); Ok(resp) } } #[tokio::main] async fn main() { simple_logger::SimpleLogger::new().init().unwrap(); let tservice = create_task(Arc::new(FakeServer::new())); let mut server = Server::new() .bind("unix:///tmp/shim-proto-ttrpc-001") .unwrap() .register_service(tservice); server.start().await.unwrap(); // Hold the main thread until receiving signal SIGTERM let (tx, rx) = std::sync::mpsc::channel(); thread::spawn(move || { ctrlc::set_handler(move || { tx.send(()).unwrap(); }) .expect("Error setting Ctrl-C handler"); println!("Server is running, press Ctrl + C to exit"); }); rx.recv().unwrap(); } ================================================ FILE: crates/shim-protos/examples/ttrpc-server.rs ================================================ // Copyright (c) 2019 Ant Financial // Copyright (c) 2021 Ant Group // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::{sync::Arc, thread}; use containerd_shim_protos::{ api::{CreateTaskRequest, CreateTaskResponse}, create_task, Task, }; use ttrpc::Server; #[derive(Debug, PartialEq)] struct FakeServer { magic: u32, } impl FakeServer { fn new() -> Self { FakeServer { magic: 0xadcbdacf } } } impl Task for FakeServer { fn create( &self, ctx: &::ttrpc::TtrpcContext, req: CreateTaskRequest, ) -> ::ttrpc::Result { let mut resp = CreateTaskResponse::default(); let md = &ctx.metadata; let v1 = md.get("key-1").unwrap(); let v2 = md.get("key-2").unwrap(); assert_eq!(v1[0], "value-1-1"); assert_eq!(v1[1], "value-1-2"); assert_eq!(v2[0], "value-2"); assert_eq!(&req.id, "id1"); resp.set_pid(0x10c0); Ok(resp) } } fn main() { simple_logger::SimpleLogger::new().init().unwrap(); let tservice = create_task(Arc::new(FakeServer::new())); let mut server = Server::new() .bind("unix:///tmp/shim-proto-ttrpc-001") .unwrap() .register_service(tservice); server.start().unwrap(); // Hold the main thread until receiving signal SIGTERM let (tx, rx) = std::sync::mpsc::channel(); thread::spawn(move || { ctrlc::set_handler(move || { tx.send(()).unwrap(); }) .expect("Error setting Ctrl-C handler"); println!("Server is running, press Ctrl + C to exit"); }); rx.recv().unwrap(); } ================================================ FILE: crates/shim-protos/rsync.txt ================================================ api/events/*.proto api/types/*.proto api/types/task/*.proto api/services/ttrpc/events/v1/*.proto api/types/runc/options/oci.proto api/runtime/sandbox/v1/sandbox.proto api/runtime/task/v2/shim.proto ================================================ FILE: crates/shim-protos/src/cgroups.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ pub mod metrics { include!(concat!(env!("OUT_DIR"), "/cgroups/metrics.rs")); } mod gogo { pub use crate::types::gogo::*; } ================================================ FILE: crates/shim-protos/src/cgroups_v2.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ pub mod metrics { include!(concat!(env!("OUT_DIR"), "/cgroups_v2/metrics.rs")); } ================================================ FILE: crates/shim-protos/src/events.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ pub mod container { include!(concat!(env!("OUT_DIR"), "/events/container.rs")); } pub mod content { include!(concat!(env!("OUT_DIR"), "/events/content.rs")); } pub mod image { include!(concat!(env!("OUT_DIR"), "/events/image.rs")); } pub mod namespace { include!(concat!(env!("OUT_DIR"), "/events/namespace.rs")); } pub mod snapshot { include!(concat!(env!("OUT_DIR"), "/events/snapshot.rs")); } pub mod task { include!(concat!(env!("OUT_DIR"), "/events/task.rs")); } pub mod sandbox { include!(concat!(env!("OUT_DIR"), "/events/sandbox.rs")); } mod mount { pub use crate::types::mount::*; } mod gogo { pub use crate::types::gogo::*; } mod fieldpath { pub use crate::types::fieldpath::*; } ================================================ FILE: crates/shim-protos/src/lib.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![cfg_attr(feature = "docs", doc = include_str!("../README.md"))] #![allow(warnings)] pub use protobuf; pub use ttrpc; pub mod cgroups; pub mod cgroups_v2; pub mod events; #[cfg(feature = "sandbox")] mod sandbox; pub mod shim; pub mod types; pub mod windows; /// Includes event names shims can publish to containerd. pub mod topics; pub mod shim_sync { /// TTRPC client reexport for easier access. pub use ttrpc::Client; /// Shim events service. pub use crate::shim::events_ttrpc::{create_events, Events, EventsClient}; /// Shim task service. pub use crate::shim::shim_ttrpc::{create_task, Task, TaskClient}; } pub use shim_sync::*; #[cfg(feature = "async")] pub mod shim_async { /// TTRPC client reexport for easier access. pub use ttrpc::asynchronous::Client; /// Shim events service. pub use crate::shim::events_ttrpc_async::{create_events, Events, EventsClient}; /// Shim task service. pub use crate::shim::shim_ttrpc_async::{create_task, Task, TaskClient}; } /// Reexport auto-generated public data structures. pub mod api { pub use crate::shim::{empty::*, events::*, mount::*, shim::*, task::*}; } #[cfg(feature = "sandbox")] pub use sandbox::sandbox as sandbox_api; #[cfg(feature = "sandbox")] pub mod sandbox_sync { pub use crate::sandbox::sandbox_ttrpc::*; } #[cfg(all(feature = "sandbox", feature = "async"))] pub mod sandbox_async { pub use crate::sandbox::sandbox_async::*; } ================================================ FILE: crates/shim-protos/src/sandbox.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ pub mod sandbox { include!(concat!(env!("OUT_DIR"), "/sandbox/sandbox.rs")); } pub mod metrics { include!(concat!(env!("OUT_DIR"), "/sandbox/metrics.rs")); } pub mod sandbox_ttrpc { include!(concat!(env!("OUT_DIR"), "/sandbox/sandbox_ttrpc.rs")); } #[cfg(feature = "async")] pub mod sandbox_async { include!(concat!(env!("OUT_DIR"), "/sandbox_async/sandbox_ttrpc.rs")); } pub(crate) mod mount { pub use crate::types::mount::*; } pub(crate) mod platform { pub use crate::types::platform::*; } ================================================ FILE: crates/shim-protos/src/shim.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ pub mod oci { include!(concat!(env!("OUT_DIR"), "/shim/oci.rs")); } pub mod event { include!(concat!(env!("OUT_DIR"), "/shim/event.rs")); } pub mod events { include!(concat!(env!("OUT_DIR"), "/shim/events.rs")); } pub mod events_ttrpc { include!(concat!(env!("OUT_DIR"), "/shim/events_ttrpc.rs")); } #[cfg(feature = "async")] pub mod events_ttrpc_async { include!(concat!(env!("OUT_DIR"), "/shim_async/events_ttrpc.rs")); } pub mod shim { include!(concat!(env!("OUT_DIR"), "/shim/shim.rs")); } pub mod shim_ttrpc { include!(concat!(env!("OUT_DIR"), "/shim/shim_ttrpc.rs")); } #[cfg(feature = "async")] pub mod shim_ttrpc_async { include!(concat!(env!("OUT_DIR"), "/shim_async/shim_ttrpc.rs")); } pub(crate) mod empty { pub use crate::types::empty::*; } pub(crate) mod mount { pub use crate::types::mount::*; } pub(crate) mod task { pub use crate::types::task::*; } mod fieldpath { pub use crate::types::fieldpath::*; } mod gogo { pub use crate::types::gogo::*; } /// Shim events service. pub use events_ttrpc::{create_events, Events, EventsClient}; /// Shim task service. pub use shim_ttrpc::{create_task, Task, TaskClient}; ================================================ FILE: crates/shim-protos/src/topics.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ //! Task event topic typically used in shim implementations. pub const TASK_CREATE_EVENT_TOPIC: &str = "/tasks/create"; pub const TASK_START_EVENT_TOPIC: &str = "/tasks/start"; pub const TASK_OOM_EVENT_TOPIC: &str = "/tasks/oom"; pub const TASK_EXIT_EVENT_TOPIC: &str = "/tasks/exit"; pub const TASK_DELETE_EVENT_TOPIC: &str = "/tasks/delete"; pub const TASK_EXEC_ADDED_EVENT_TOPIC: &str = "/tasks/exec-added"; pub const TASK_EXEC_STARTED_EVENT_TOPIC: &str = "/tasks/exec-started"; pub const TASK_PAUSED_EVENT_TOPIC: &str = "/tasks/paused"; pub const TASK_RESUMED_EVENT_TOPIC: &str = "/tasks/resumed"; pub const TASK_CHECKPOINTED_EVENT_TOPIC: &str = "/tasks/checkpointed"; pub const TASK_UNKNOWN_TOPIC: &str = "/tasks/?"; ================================================ FILE: crates/shim-protos/src/types.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ pub mod empty { include!(concat!(env!("OUT_DIR"), "/types/empty.rs")); } pub mod gogo { include!(concat!(env!("OUT_DIR"), "/types/gogo.rs")); } pub mod mount { include!(concat!(env!("OUT_DIR"), "/types/mount.rs")); } pub mod task { include!(concat!(env!("OUT_DIR"), "/types/task.rs")); } pub mod fieldpath { include!(concat!(env!("OUT_DIR"), "/types/fieldpath.rs")); } pub mod introspection { include!(concat!(env!("OUT_DIR"), "/types/introspection.rs")); } #[cfg(feature = "sandbox")] pub mod platform { include!(concat!(env!("OUT_DIR"), "/types/platform.rs")); } ================================================ FILE: crates/shim-protos/src/windows.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ pub mod stats { include!(concat!(env!("OUT_DIR"), "/stats/stats.rs")); } pub mod metrics { pub use crate::cgroups::metrics::{file_descriptor, Metrics}; } ================================================ FILE: crates/shim-protos/tests/ttrpc.rs ================================================ // Copyright (c) 2021 Alibaba Cloud // // SPDX-License-Identifier: Apache-2.0 // use std::{ collections::HashMap, sync::{mpsc::channel, Arc}, }; use containerd_shim_protos::{ api::{CreateTaskRequest, CreateTaskResponse, DeleteRequest}, shim::shim_ttrpc::create_task, Task, }; use protobuf::{CodedInputStream, CodedOutputStream, Message}; use ttrpc::{Code, MessageHeader, Request, Response, TtrpcContext}; const MESSAGE_TYPE_REQUEST: u8 = 0x1; const MESSAGE_TYPE_RESPONSE: u8 = 0x2; #[derive(Debug, PartialEq)] struct FakeServer { magic: u32, } impl FakeServer { fn new() -> Self { FakeServer { magic: 0xadcbdacf } } } impl Task for FakeServer { fn create( &self, _ctx: &::ttrpc::TtrpcContext, req: CreateTaskRequest, ) -> ::ttrpc::Result { let mut resp = CreateTaskResponse::default(); assert_eq!(&req.id, "test1"); resp.set_pid(0x10c0); assert_eq!(resp.compute_size(), 3); Ok(resp) } } fn create_ttrpc_context() -> ( TtrpcContext, std::sync::mpsc::Receiver<(MessageHeader, Vec)>, ) { let (res_tx, rx) = channel(); let mh = MessageHeader { type_: MESSAGE_TYPE_REQUEST, ..Default::default() }; let (_, cancel_rx) = crossbeam::channel::unbounded(); let ctx = TtrpcContext { fd: -1, mh, res_tx, metadata: HashMap::new(), timeout_nano: 0, cancel_rx, }; (ctx, rx) } #[test] fn test_task_method_num() { let task = create_task(Arc::new(FakeServer::new())); assert_eq!(task.len(), 17); } #[test] fn test_create_task() { let mut req = CreateTaskRequest::default(); req.set_id("test1".to_owned()); let mut buf = Vec::with_capacity(req.compute_size() as usize); { let mut s = CodedOutputStream::vec(&mut buf); req.write_to(&mut s).unwrap(); s.flush().unwrap(); } assert_eq!(buf.len(), 7); let (ctx, rx) = create_ttrpc_context(); let mut request = Request::new(); request.set_service("containerd.task.v2.Task".to_owned()); request.set_method("Create".to_owned()); request.set_payload(buf); request.set_timeout_nano(10000); request.set_metadata(ttrpc::context::to_pb(ctx.metadata.clone())); let task = create_task(Arc::new(FakeServer::new())); let create = task.get("/containerd.task.v2.Task/Create").unwrap(); create.handler(ctx, request).unwrap(); let (header, msg) = rx.recv().unwrap(); assert_eq!(header.length, 7); assert_eq!(header.type_, MESSAGE_TYPE_RESPONSE); assert_eq!(header.flags, 0); assert_eq!(msg.len(), 7); let mut s = CodedInputStream::from_bytes(&msg); let mut response = Response::new(); response.merge_from(&mut s).unwrap(); assert_eq!(response.status().code(), Code::OK); let mut s = CodedInputStream::from_bytes(&response.payload); let mut resp = CreateTaskResponse::new(); resp.merge_from(&mut s).unwrap(); assert_eq!(resp.pid, 0x10c0); } #[test] fn test_delete_task() { let mut req = DeleteRequest::default(); req.set_id("test1".to_owned()); let mut buf = Vec::with_capacity(req.compute_size() as usize); { let mut s = CodedOutputStream::vec(&mut buf); req.write_to(&mut s).unwrap(); s.flush().unwrap(); } assert_eq!(buf.len(), 7); let (ctx, rx) = create_ttrpc_context(); let mut request = Request::new(); request.set_service("containerd.task.v2.Task".to_owned()); request.set_method("Delete".to_owned()); request.set_payload(buf); request.set_timeout_nano(10000); request.set_metadata(ttrpc::context::to_pb(ctx.metadata.clone())); let task = create_task(Arc::new(FakeServer::new())); let delete = task.get("/containerd.task.v2.Task/Delete").unwrap(); delete.handler(ctx, request).unwrap(); let (header, msg) = rx.recv().unwrap(); assert_eq!(header.length, 54); assert_eq!(header.type_, MESSAGE_TYPE_RESPONSE); assert_eq!(header.flags, 0); assert_eq!(msg.len(), 54); let mut s = CodedInputStream::from_bytes(&msg); let mut response = Response::new(); response.merge_from(&mut s).unwrap(); assert_ne!(response.status().code(), Code::OK); } ================================================ FILE: crates/shim-protos/vendor/README.md ================================================ # Vendor This directory contains vendor dependencies needed to generate protobuf bindings. Proto files are copy-pasted directly from upstream repos: + https://github.com/containerd/containerd + https://github.com/protocolbuffers/protobuf + https://github.com/gogo/protobuf + https://github.com/containerd/cgroups + https://github.com/microsoft/hcsshim ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/cgroups/cgroup2/stats/metrics.proto ================================================ syntax = "proto3"; package io.containerd.cgroups.v2; option go_package = "github.com/containerd/cgroups/cgroup2/stats"; message Metrics { PidsStat pids = 1; CPUStat cpu = 2; MemoryStat memory = 4; RdmaStat rdma = 5; IOStat io = 6; repeated HugeTlbStat hugetlb = 7; MemoryEvents memory_events = 8; repeated NetworkStat network = 9; } message PSIData { double avg10 = 1; double avg60 = 2; double avg300 = 3; uint64 total = 4; } message PSIStats { PSIData some = 1; PSIData full = 2; } message PidsStat { uint64 current = 1; uint64 limit = 2; } message CPUStat { uint64 usage_usec = 1; uint64 user_usec = 2; uint64 system_usec = 3; uint64 nr_periods = 4; uint64 nr_throttled = 5; uint64 throttled_usec = 6; PSIStats psi = 7; uint64 nr_bursts = 8; uint64 burst_usec = 9; } message MemoryStat { uint64 anon = 1; uint64 file = 2; uint64 kernel_stack = 3; uint64 slab = 4; uint64 sock = 5; uint64 shmem = 6; uint64 file_mapped = 7; uint64 file_dirty = 8; uint64 file_writeback = 9; uint64 anon_thp = 10; uint64 inactive_anon = 11; uint64 active_anon = 12; uint64 inactive_file = 13; uint64 active_file = 14; uint64 unevictable = 15; uint64 slab_reclaimable = 16; uint64 slab_unreclaimable = 17; uint64 pgfault = 18; uint64 pgmajfault = 19; uint64 workingset_refault = 20; uint64 workingset_activate = 21; uint64 workingset_nodereclaim = 22; uint64 pgrefill = 23; uint64 pgscan = 24; uint64 pgsteal = 25; uint64 pgactivate = 26; uint64 pgdeactivate = 27; uint64 pglazyfree = 28; uint64 pglazyfreed = 29; uint64 thp_fault_alloc = 30; uint64 thp_collapse_alloc = 31; uint64 usage = 32; uint64 usage_limit = 33; uint64 swap_usage = 34; uint64 swap_limit = 35; uint64 max_usage = 36; uint64 swap_max_usage = 37; PSIStats psi = 38; } message MemoryEvents { uint64 low = 1; uint64 high = 2; uint64 max = 3; uint64 oom = 4; uint64 oom_kill = 5; uint64 oom_group_kill = 6; } message RdmaStat { repeated RdmaEntry current = 1; repeated RdmaEntry limit = 2; } message RdmaEntry { string device = 1; uint32 hca_handles = 2; uint32 hca_objects = 3; } message IOStat { repeated IOEntry usage = 1; PSIStats psi = 2; } message IOEntry { uint64 major = 1; uint64 minor = 2; uint64 rbytes = 3; uint64 wbytes = 4; uint64 rios = 5; uint64 wios = 6; } message HugeTlbStat { uint64 current = 1; uint64 max = 2; string pagesize = 3; uint64 failcnt = 4; } message NetworkStat { string name = 1; uint64 rx_bytes = 2; uint64 rx_packets = 3; uint64 rx_errors = 4; uint64 rx_dropped = 5; uint64 tx_bytes = 6; uint64 tx_packets = 7; uint64 tx_errors = 8; uint64 tx_dropped = 9; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto ================================================ syntax = "proto3"; package io.containerd.cgroups.v1; import "gogoproto/gogo.proto"; message Metrics { repeated HugetlbStat hugetlb = 1; PidsStat pids = 2; CPUStat cpu = 3 [(gogoproto.customname) = "CPU"]; MemoryStat memory = 4; BlkIOStat blkio = 5; RdmaStat rdma = 6; repeated NetworkStat network = 7; CgroupStats cgroup_stats = 8; MemoryOomControl memory_oom_control = 9; } message HugetlbStat { uint64 usage = 1; uint64 max = 2; uint64 failcnt = 3; string pagesize = 4; } message PidsStat { uint64 current = 1; uint64 limit = 2; } message CPUStat { CPUUsage usage = 1; Throttle throttling = 2; } message CPUUsage { // values in nanoseconds uint64 total = 1; uint64 kernel = 2; uint64 user = 3; repeated uint64 per_cpu = 4 [(gogoproto.customname) = "PerCPU"]; } message Throttle { uint64 periods = 1; uint64 throttled_periods = 2; uint64 throttled_time = 3; } message MemoryStat { uint64 cache = 1; uint64 rss = 2 [(gogoproto.customname) = "RSS"]; uint64 rss_huge = 3 [(gogoproto.customname) = "RSSHuge"]; uint64 mapped_file = 4; uint64 dirty = 5; uint64 writeback = 6; uint64 pg_pg_in = 7; uint64 pg_pg_out = 8; uint64 pg_fault = 9; uint64 pg_maj_fault = 10; uint64 inactive_anon = 11; uint64 active_anon = 12; uint64 inactive_file = 13; uint64 active_file = 14; uint64 unevictable = 15; uint64 hierarchical_memory_limit = 16; uint64 hierarchical_swap_limit = 17; uint64 total_cache = 18; uint64 total_rss = 19 [(gogoproto.customname) = "TotalRSS"]; uint64 total_rss_huge = 20 [(gogoproto.customname) = "TotalRSSHuge"]; uint64 total_mapped_file = 21; uint64 total_dirty = 22; uint64 total_writeback = 23; uint64 total_pg_pg_in = 24; uint64 total_pg_pg_out = 25; uint64 total_pg_fault = 26; uint64 total_pg_maj_fault = 27; uint64 total_inactive_anon = 28; uint64 total_active_anon = 29; uint64 total_inactive_file = 30; uint64 total_active_file = 31; uint64 total_unevictable = 32; MemoryEntry usage = 33; MemoryEntry swap = 34; MemoryEntry kernel = 35; MemoryEntry kernel_tcp = 36 [(gogoproto.customname) = "KernelTCP"]; } message MemoryEntry { uint64 limit = 1; uint64 usage = 2; uint64 max = 3; uint64 failcnt = 4; } message MemoryOomControl { uint64 oom_kill_disable = 1; uint64 under_oom = 2; uint64 oom_kill = 3; } message BlkIOStat { repeated BlkIOEntry io_service_bytes_recursive = 1; repeated BlkIOEntry io_serviced_recursive = 2; repeated BlkIOEntry io_queued_recursive = 3; repeated BlkIOEntry io_service_time_recursive = 4; repeated BlkIOEntry io_wait_time_recursive = 5; repeated BlkIOEntry io_merged_recursive = 6; repeated BlkIOEntry io_time_recursive = 7; repeated BlkIOEntry sectors_recursive = 8; } message BlkIOEntry { string op = 1; string device = 2; uint64 major = 3; uint64 minor = 4; uint64 value = 5; } message RdmaStat { repeated RdmaEntry current = 1; repeated RdmaEntry limit = 2; } message RdmaEntry { string device = 1; uint32 hca_handles = 2; uint32 hca_objects = 3; } message NetworkStat { string name = 1; uint64 rx_bytes = 2; uint64 rx_packets = 3; uint64 rx_errors = 4; uint64 rx_dropped = 5; uint64 tx_bytes = 6; uint64 tx_packets = 7; uint64 tx_errors = 8; uint64 tx_dropped = 9; } // CgroupStats exports per-cgroup statistics. message CgroupStats { // number of tasks sleeping uint64 nr_sleeping = 1; // number of tasks running uint64 nr_running = 2; // number of tasks in stopped state uint64 nr_stopped = 3; // number of tasks in uninterruptible state uint64 nr_uninterruptible = 4; // number of tasks waiting on IO uint64 nr_io_wait = 5; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto ================================================ syntax = "proto3"; package io.containerd.cgroups.v1; option go_package = "github.com/containerd/cgroups/cgroup1/stats"; message Metrics { repeated HugetlbStat hugetlb = 1; PidsStat pids = 2; CPUStat cpu = 3; MemoryStat memory = 4; BlkIOStat blkio = 5; RdmaStat rdma = 6; repeated NetworkStat network = 7; CgroupStats cgroup_stats = 8; MemoryOomControl memory_oom_control = 9; } message HugetlbStat { uint64 usage = 1; uint64 max = 2; uint64 failcnt = 3; string pagesize = 4; } message PidsStat { uint64 current = 1; uint64 limit = 2; } message CPUStat { CPUUsage usage = 1; Throttle throttling = 2; } message CPUUsage { // values in nanoseconds uint64 total = 1; uint64 kernel = 2; uint64 user = 3; repeated uint64 per_cpu = 4; } message Throttle { uint64 periods = 1; uint64 throttled_periods = 2; uint64 throttled_time = 3; } message MemoryStat { uint64 cache = 1; uint64 rss = 2; uint64 rss_huge = 3; uint64 mapped_file = 4; uint64 dirty = 5; uint64 writeback = 6; uint64 pg_pg_in = 7; uint64 pg_pg_out = 8; uint64 pg_fault = 9; uint64 pg_maj_fault = 10; uint64 inactive_anon = 11; uint64 active_anon = 12; uint64 inactive_file = 13; uint64 active_file = 14; uint64 unevictable = 15; uint64 hierarchical_memory_limit = 16; uint64 hierarchical_swap_limit = 17; uint64 total_cache = 18; uint64 total_rss = 19; uint64 total_rss_huge = 20; uint64 total_mapped_file = 21; uint64 total_dirty = 22; uint64 total_writeback = 23; uint64 total_pg_pg_in = 24; uint64 total_pg_pg_out = 25; uint64 total_pg_fault = 26; uint64 total_pg_maj_fault = 27; uint64 total_inactive_anon = 28; uint64 total_active_anon = 29; uint64 total_inactive_file = 30; uint64 total_active_file = 31; uint64 total_unevictable = 32; MemoryEntry usage = 33; MemoryEntry swap = 34; MemoryEntry kernel = 35; MemoryEntry kernel_tcp = 36; } message MemoryEntry { uint64 limit = 1; uint64 usage = 2; uint64 max = 3; uint64 failcnt = 4; } message MemoryOomControl { uint64 oom_kill_disable = 1; uint64 under_oom = 2; uint64 oom_kill = 3; } message BlkIOStat { repeated BlkIOEntry io_service_bytes_recursive = 1; repeated BlkIOEntry io_serviced_recursive = 2; repeated BlkIOEntry io_queued_recursive = 3; repeated BlkIOEntry io_service_time_recursive = 4; repeated BlkIOEntry io_wait_time_recursive = 5; repeated BlkIOEntry io_merged_recursive = 6; repeated BlkIOEntry io_time_recursive = 7; repeated BlkIOEntry sectors_recursive = 8; } message BlkIOEntry { string op = 1; string device = 2; uint64 major = 3; uint64 minor = 4; uint64 value = 5; } message RdmaStat { repeated RdmaEntry current = 1; repeated RdmaEntry limit = 2; } message RdmaEntry { string device = 1; uint32 hca_handles = 2; uint32 hca_objects = 3; } message NetworkStat { string name = 1; uint64 rx_bytes = 2; uint64 rx_packets = 3; uint64 rx_errors = 4; uint64 rx_dropped = 5; uint64 tx_bytes = 6; uint64 tx_packets = 7; uint64 tx_errors = 8; uint64 tx_dropped = 9; } // CgroupStats exports per-cgroup statistics. message CgroupStats { // number of tasks sleeping uint64 nr_sleeping = 1; // number of tasks running uint64 nr_running = 2; // number of tasks in stopped state uint64 nr_stopped = 3; // number of tasks in uninterruptible state uint64 nr_uninterruptible = 4; // number of tasks waiting on IO uint64 nr_io_wait = 5; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/events/container.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "google/protobuf/any.proto"; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message ContainerCreate { string id = 1; string image = 2; message Runtime { string name = 1; google.protobuf.Any options = 2; } Runtime runtime = 3; } message ContainerUpdate { string id = 1; string image = 2; map labels = 3; string snapshot_key = 4; } message ContainerDelete { string id = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/events/content.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message ContentCreate { string digest = 1; int64 size = 2; } message ContentDelete { string digest = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/events/image.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.images.v1; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message ImageCreate { string name = 1; map labels = 2; } message ImageUpdate { string name = 1; map labels = 2; } message ImageDelete { string name = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/events/namespace.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message NamespaceCreate { string name = 1; map labels = 2; } message NamespaceUpdate { string name = 1; map labels = 2; } message NamespaceDelete { string name = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/events/sandbox.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; message SandboxCreate { string sandbox_id = 1; } message SandboxStart { string sandbox_id = 1; } message SandboxExit { string sandbox_id = 1; uint32 exit_status = 2; google.protobuf.Timestamp exited_at = 3; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/events/snapshot.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message SnapshotPrepare { string key = 1; string parent = 2; string snapshotter = 5; } message SnapshotCommit { string key = 1; string name = 2; string snapshotter = 5; } message SnapshotRemove { string key = 1; string snapshotter = 5; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/events/task.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "google/protobuf/timestamp.proto"; import "types/fieldpath.proto"; import "types/mount.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message TaskCreate { string container_id = 1; string bundle = 2; repeated containerd.types.Mount rootfs = 3; TaskIO io = 4; string checkpoint = 5; uint32 pid = 6; } message TaskStart { string container_id = 1; uint32 pid = 2; } message TaskDelete { string container_id = 1; uint32 pid = 2; uint32 exit_status = 3; google.protobuf.Timestamp exited_at = 4; // id is the specific exec. By default if omitted will be `""` thus matches // the init exec of the task matching `container_id`. string id = 5; } message TaskIO { string stdin = 1; string stdout = 2; string stderr = 3; bool terminal = 4; } message TaskExit { string container_id = 1; string id = 2; uint32 pid = 3; uint32 exit_status = 4; google.protobuf.Timestamp exited_at = 5; } message TaskOOM { string container_id = 1; } message TaskExecAdded { string container_id = 1; string exec_id = 2; } message TaskExecStarted { string container_id = 1; string exec_id = 2; uint32 pid = 3; } message TaskPaused { string container_id = 1; } message TaskResumed { string container_id = 1; } message TaskCheckpointed { string container_id = 1; string checkpoint = 2; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.runtime.sandbox.v1; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; import "types/metrics.proto"; import "types/mount.proto"; import "types/platform.proto"; option go_package = "github.com/containerd/containerd/api/runtime/sandbox/v1;sandbox"; // Sandbox is an optional interface that shim may implement to support sandboxes environments. // A typical example of sandbox is microVM or pause container - an entity that groups containers and/or // holds resources relevant for this group. service Sandbox { // CreateSandbox will be called right after sandbox shim instance launched. // It is a good place to initialize sandbox environment. rpc CreateSandbox(CreateSandboxRequest) returns (CreateSandboxResponse); // StartSandbox will start a previously created sandbox. rpc StartSandbox(StartSandboxRequest) returns (StartSandboxResponse); // Platform queries the platform the sandbox is going to run containers on. // containerd will use this to generate a proper OCI spec. rpc Platform(PlatformRequest) returns (PlatformResponse); // StopSandbox will stop existing sandbox instance rpc StopSandbox(StopSandboxRequest) returns (StopSandboxResponse); // WaitSandbox blocks until sandbox exits. rpc WaitSandbox(WaitSandboxRequest) returns (WaitSandboxResponse); // SandboxStatus will return current status of the running sandbox instance rpc SandboxStatus(SandboxStatusRequest) returns (SandboxStatusResponse); // PingSandbox is a lightweight API call to check whether sandbox alive. rpc PingSandbox(PingRequest) returns (PingResponse); // ShutdownSandbox must shutdown shim instance. rpc ShutdownSandbox(ShutdownSandboxRequest) returns (ShutdownSandboxResponse); // SandboxMetrics retrieves metrics about a sandbox instance. rpc SandboxMetrics(SandboxMetricsRequest) returns (SandboxMetricsResponse); } message CreateSandboxRequest { string sandbox_id = 1; string bundle_path = 2; repeated containerd.types.Mount rootfs = 3; google.protobuf.Any options = 4; string netns_path = 5; map annotations = 6; } message CreateSandboxResponse {} message StartSandboxRequest { string sandbox_id = 1; } message StartSandboxResponse { uint32 pid = 1; google.protobuf.Timestamp created_at = 2; google.protobuf.Any spec = 3; } message PlatformRequest { string sandbox_id = 1; } message PlatformResponse { containerd.types.Platform platform = 1; } message StopSandboxRequest { string sandbox_id = 1; uint32 timeout_secs = 2; } message StopSandboxResponse {} message UpdateSandboxRequest { string sandbox_id = 1; google.protobuf.Any resources = 2; map annotations = 3; } message WaitSandboxRequest { string sandbox_id = 1; } message WaitSandboxResponse { uint32 exit_status = 1; google.protobuf.Timestamp exited_at = 2; } message UpdateSandboxResponse {} message SandboxStatusRequest { string sandbox_id = 1; bool verbose = 2; } message SandboxStatusResponse { string sandbox_id = 1; uint32 pid = 2; string state = 3; map info = 4; google.protobuf.Timestamp created_at = 5; google.protobuf.Timestamp exited_at = 6; google.protobuf.Any extra = 7; } message PingRequest { string sandbox_id = 1; } message PingResponse {} message ShutdownSandboxRequest { string sandbox_id = 1; } message ShutdownSandboxResponse {} message SandboxMetricsRequest { string sandbox_id = 1; } message SandboxMetricsResponse { containerd.types.Metric metrics = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.task.v2; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; import "types/mount.proto"; import "types/task/task.proto"; option go_package = "github.com/containerd/containerd/api/runtime/task/v2;task"; // Shim service is launched for each container and is responsible for owning the IO // for the container and its additional processes. The shim is also the parent of // each container and allows reattaching to the IO and receiving the exit status // for the container processes. service Task { rpc State(StateRequest) returns (StateResponse); rpc Create(CreateTaskRequest) returns (CreateTaskResponse); rpc Start(StartRequest) returns (StartResponse); rpc Delete(DeleteRequest) returns (DeleteResponse); rpc Pids(PidsRequest) returns (PidsResponse); rpc Pause(PauseRequest) returns (google.protobuf.Empty); rpc Resume(ResumeRequest) returns (google.protobuf.Empty); rpc Checkpoint(CheckpointTaskRequest) returns (google.protobuf.Empty); rpc Kill(KillRequest) returns (google.protobuf.Empty); rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty); rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty); rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty); rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty); rpc Wait(WaitRequest) returns (WaitResponse); rpc Stats(StatsRequest) returns (StatsResponse); rpc Connect(ConnectRequest) returns (ConnectResponse); rpc Shutdown(ShutdownRequest) returns (google.protobuf.Empty); } message CreateTaskRequest { string id = 1; string bundle = 2; repeated containerd.types.Mount rootfs = 3; bool terminal = 4; string stdin = 5; string stdout = 6; string stderr = 7; string checkpoint = 8; string parent_checkpoint = 9; google.protobuf.Any options = 10; } message CreateTaskResponse { uint32 pid = 1; } message DeleteRequest { string id = 1; string exec_id = 2; } message DeleteResponse { uint32 pid = 1; uint32 exit_status = 2; google.protobuf.Timestamp exited_at = 3; } message ExecProcessRequest { string id = 1; string exec_id = 2; bool terminal = 3; string stdin = 4; string stdout = 5; string stderr = 6; google.protobuf.Any spec = 7; } message ExecProcessResponse {} message ResizePtyRequest { string id = 1; string exec_id = 2; uint32 width = 3; uint32 height = 4; } message StateRequest { string id = 1; string exec_id = 2; } message StateResponse { string id = 1; string bundle = 2; uint32 pid = 3; containerd.v1.types.Status status = 4; string stdin = 5; string stdout = 6; string stderr = 7; bool terminal = 8; uint32 exit_status = 9; google.protobuf.Timestamp exited_at = 10; string exec_id = 11; } message KillRequest { string id = 1; string exec_id = 2; uint32 signal = 3; bool all = 4; } message CloseIORequest { string id = 1; string exec_id = 2; bool stdin = 3; } message PidsRequest { string id = 1; } message PidsResponse { repeated containerd.v1.types.ProcessInfo processes = 1; } message CheckpointTaskRequest { string id = 1; string path = 2; google.protobuf.Any options = 3; } message UpdateTaskRequest { string id = 1; google.protobuf.Any resources = 2; map annotations = 3; } message StartRequest { string id = 1; string exec_id = 2; } message StartResponse { uint32 pid = 1; } message WaitRequest { string id = 1; string exec_id = 2; } message WaitResponse { uint32 exit_status = 1; google.protobuf.Timestamp exited_at = 2; } message StatsRequest { string id = 1; } message StatsResponse { google.protobuf.Any stats = 1; } message ConnectRequest { string id = 1; } message ConnectResponse { uint32 shim_pid = 1; uint32 task_pid = 2; string version = 3; } message ShutdownRequest { string id = 1; bool now = 2; } message PauseRequest { string id = 1; } message ResumeRequest { string id = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.events.ttrpc.v1; import "google/protobuf/empty.proto"; import "types/event.proto"; option go_package = "github.com/containerd/containerd/api/services/ttrpc/events/v1;events"; service Events { // Forward sends an event that has already been packaged into an envelope // with a timestamp and namespace. // // This is useful if earlier timestamping is required or when forwarding on // behalf of another component, namespace or publisher. rpc Forward(ForwardRequest) returns (google.protobuf.Empty); } message ForwardRequest { containerd.types.Envelope envelope = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/types/descriptor.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; option go_package = "github.com/containerd/containerd/api/types;types"; // Descriptor describes a blob in a content store. // // This descriptor can be used to reference content from an // oci descriptor found in a manifest. // See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor message Descriptor { string media_type = 1; string digest = 2; int64 size = 3; map annotations = 5; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/types/event.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; message Envelope { option (containerd.types.fieldpath) = true; google.protobuf.Timestamp timestamp = 1; string namespace = 2; string topic = 3; google.protobuf.Any event = 4; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/types/fieldpath.proto ================================================ // Protocol Buffers for Go with Gadgets // // Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package containerd.types; import "google/protobuf/descriptor.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; extend google.protobuf.FileOptions { optional bool fieldpath_all = 63300; } extend google.protobuf.MessageOptions { optional bool fieldpath = 64400; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/types/introspection.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; message RuntimeRequest { string runtime_path = 1; // Options correspond to CreateTaskRequest.options. // This is needed to pass the runc binary path, etc. google.protobuf.Any options = 2; } message RuntimeVersion { string version = 1; string revision = 2; } message RuntimeInfo { string name = 1; RuntimeVersion version = 2; // Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.) google.protobuf.Any options = 3; // OCI-compatible runtimes should use https://github.com/opencontainers/runtime-spec/blob/main/features.md google.protobuf.Any features = 4; // Annotations of the shim. Irrelevant to features.Annotations. map annotations = 5; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/types/metrics.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; message Metric { google.protobuf.Timestamp timestamp = 1; string id = 2; google.protobuf.Any data = 3; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/types/mount.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; // Mount describes mounts for a container. // // This type is the lingua franca of ContainerD. All services provide mounts // to be used with the container at creation time. // // The Mount type follows the structure of the mount syscall, including a type, // source, target and options. message Mount { // Type defines the nature of the mount. string type = 1; // Source specifies the name of the mount. Depending on mount type, this // may be a volume name or a host path, or even ignored. string source = 2; // Target path in container string target = 3; // Options specifies zero or more fstab style mount options. repeated string options = 4; } message ActiveMount { Mount mount = 1; google.protobuf.Timestamp mounted_at = 2; string mount_point = 3; map data = 4; } message ActivationInfo { string name = 1; repeated ActiveMount active = 2; repeated Mount system = 3; map labels = 4; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/types/platform.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; option go_package = "github.com/containerd/containerd/api/types;types"; // Platform follows the structure of the OCI platform specification, from // descriptors. message Platform { string os = 1; string architecture = 2; string variant = 3; string os_version = 4; repeated string os_features = 5; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/types/runc/options/oci.proto ================================================ syntax = "proto3"; package containerd.runc.v1; option go_package = "github.com/containerd/containerd/api/types/runc/options;options"; message Options { // disable pivot root when creating a container bool no_pivot_root = 1; // create a new keyring for the container bool no_new_keyring = 2; // place the shim in a cgroup string shim_cgroup = 3; // set the I/O's pipes uid uint32 io_uid = 4; // set the I/O's pipes gid uint32 io_gid = 5; // binary name of the runc binary string binary_name = 6; // runc root directory string root = 7; // criu binary path. // // Removed in containerd v2.0: string criu_path = 8; reserved 8; // enable systemd cgroups bool systemd_cgroup = 9; // criu image path string criu_image_path = 10; // criu work path string criu_work_path = 11; // task api address, can be a unix domain socket, or vsock address. // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://:. string task_api_address = 12; // task api version, currently supported value is 2 and 3. uint32 task_api_version = 13; } message CheckpointOptions { // exit the container after a checkpoint bool exit = 1; // checkpoint open tcp connections bool open_tcp = 2; // checkpoint external unix sockets bool external_unix_sockets = 3; // checkpoint terminals (ptys) bool terminal = 4; // allow checkpointing of file locks bool file_locks = 5; // restore provided namespaces as empty namespaces repeated string empty_namespaces = 6; // set the cgroups mode, soft, full, strict string cgroups_mode = 7; // checkpoint image path string image_path = 8; // checkpoint work path string work_path = 9; } message ProcessDetails { // exec process id if the process is managed by a shim string exec_id = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/types/sandbox.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; // Sandbox represents a sandbox metadata object that keeps all info required by controller to // work with a particular instance. message Sandbox { // SandboxID is a unique instance identifier within namespace string sandbox_id = 1; message Runtime { // Name is the name of the runtime. string name = 1; // Options specify additional runtime initialization options for the shim (this data will be available in StartShim). // Typically this data expected to be runtime shim implementation specific. google.protobuf.Any options = 2; } // Runtime specifies which runtime to use for executing this container. Runtime runtime = 2; // Spec is sandbox configuration (kin of OCI runtime spec), spec's data will be written to a config.json file in the // bundle directory (similary to OCI spec). google.protobuf.Any spec = 3; // Labels provides an area to include arbitrary data on containers. map labels = 4; // CreatedAt is the time the container was first created. google.protobuf.Timestamp created_at = 5; // UpdatedAt is the last time the container was mutated. google.protobuf.Timestamp updated_at = 6; // Extensions allow clients to provide optional blobs that can be handled by runtime. map extensions = 7; // Sandboxer is the name of the sandbox controller who manages the sandbox. string sandboxer = 10; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/api/types/task/task.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.v1.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types/task"; enum Status { UNKNOWN = 0; CREATED = 1; RUNNING = 2; STOPPED = 3; PAUSED = 4; PAUSING = 5; } message Process { string container_id = 1; string id = 2; uint32 pid = 3; Status status = 4; string stdin = 5; string stdout = 6; string stderr = 7; bool terminal = 8; uint32 exit_status = 9; google.protobuf.Timestamp exited_at = 10; } message ProcessInfo { // PID is the process ID. uint32 pid = 1; // Info contains additional process information. // // Info varies by platform. google.protobuf.Any info = 2; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/container.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "google/protobuf/any.proto"; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message ContainerCreate { string id = 1; string image = 2; message Runtime { string name = 1; google.protobuf.Any options = 2; } Runtime runtime = 3; } message ContainerUpdate { string id = 1; string image = 2; map labels = 3; string snapshot_key = 4; } message ContainerDelete { string id = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/content.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message ContentCreate { string digest = 1; int64 size = 2; } message ContentDelete { string digest = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/image.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.images.v1; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message ImageCreate { string name = 1; map labels = 2; } message ImageUpdate { string name = 1; map labels = 2; } message ImageDelete { string name = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/namespace.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message NamespaceCreate { string name = 1; map labels = 2; } message NamespaceUpdate { string name = 1; map labels = 2; } message NamespaceDelete { string name = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/sandbox.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; message SandboxCreate { string sandbox_id = 1; } message SandboxStart { string sandbox_id = 1; } message SandboxExit { string sandbox_id = 1; uint32 exit_status = 2; google.protobuf.Timestamp exited_at = 3; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/snapshot.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message SnapshotPrepare { string key = 1; string parent = 2; string snapshotter = 5; } message SnapshotCommit { string key = 1; string name = 2; string snapshotter = 5; } message SnapshotRemove { string key = 1; string snapshotter = 5; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/task.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.events; import "google/protobuf/timestamp.proto"; import "types/fieldpath.proto"; import "types/mount.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.types.fieldpath_all) = true; message TaskCreate { string container_id = 1; string bundle = 2; repeated containerd.types.Mount rootfs = 3; TaskIO io = 4; string checkpoint = 5; uint32 pid = 6; } message TaskStart { string container_id = 1; uint32 pid = 2; } message TaskDelete { string container_id = 1; uint32 pid = 2; uint32 exit_status = 3; google.protobuf.Timestamp exited_at = 4; // id is the specific exec. By default if omitted will be `""` thus matches // the init exec of the task matching `container_id`. string id = 5; } message TaskIO { string stdin = 1; string stdout = 2; string stderr = 3; bool terminal = 4; } message TaskExit { string container_id = 1; string id = 2; uint32 pid = 3; uint32 exit_status = 4; google.protobuf.Timestamp exited_at = 5; } message TaskOOM { string container_id = 1; } message TaskExecAdded { string container_id = 1; string exec_id = 2; } message TaskExecStarted { string container_id = 1; string exec_id = 2; uint32 pid = 3; } message TaskPaused { string container_id = 1; } message TaskResumed { string container_id = 1; } message TaskCheckpointed { string container_id = 1; string checkpoint = 2; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.runtime.sandbox.v1; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; import "types/metrics.proto"; import "types/mount.proto"; import "types/platform.proto"; option go_package = "github.com/containerd/containerd/api/runtime/sandbox/v1;sandbox"; // Sandbox is an optional interface that shim may implement to support sandboxes environments. // A typical example of sandbox is microVM or pause container - an entity that groups containers and/or // holds resources relevant for this group. service Sandbox { // CreateSandbox will be called right after sandbox shim instance launched. // It is a good place to initialize sandbox environment. rpc CreateSandbox(CreateSandboxRequest) returns (CreateSandboxResponse); // StartSandbox will start a previously created sandbox. rpc StartSandbox(StartSandboxRequest) returns (StartSandboxResponse); // Platform queries the platform the sandbox is going to run containers on. // containerd will use this to generate a proper OCI spec. rpc Platform(PlatformRequest) returns (PlatformResponse); // StopSandbox will stop existing sandbox instance rpc StopSandbox(StopSandboxRequest) returns (StopSandboxResponse); // WaitSandbox blocks until sandbox exits. rpc WaitSandbox(WaitSandboxRequest) returns (WaitSandboxResponse); // SandboxStatus will return current status of the running sandbox instance rpc SandboxStatus(SandboxStatusRequest) returns (SandboxStatusResponse); // PingSandbox is a lightweight API call to check whether sandbox alive. rpc PingSandbox(PingRequest) returns (PingResponse); // ShutdownSandbox must shutdown shim instance. rpc ShutdownSandbox(ShutdownSandboxRequest) returns (ShutdownSandboxResponse); // SandboxMetrics retrieves metrics about a sandbox instance. rpc SandboxMetrics(SandboxMetricsRequest) returns (SandboxMetricsResponse); } message CreateSandboxRequest { string sandbox_id = 1; string bundle_path = 2; repeated containerd.types.Mount rootfs = 3; google.protobuf.Any options = 4; string netns_path = 5; map annotations = 6; } message CreateSandboxResponse {} message StartSandboxRequest { string sandbox_id = 1; } message StartSandboxResponse { uint32 pid = 1; google.protobuf.Timestamp created_at = 2; google.protobuf.Any spec = 3; } message PlatformRequest { string sandbox_id = 1; } message PlatformResponse { containerd.types.Platform platform = 1; } message StopSandboxRequest { string sandbox_id = 1; uint32 timeout_secs = 2; } message StopSandboxResponse {} message UpdateSandboxRequest { string sandbox_id = 1; google.protobuf.Any resources = 2; map annotations = 3; } message WaitSandboxRequest { string sandbox_id = 1; } message WaitSandboxResponse { uint32 exit_status = 1; google.protobuf.Timestamp exited_at = 2; } message UpdateSandboxResponse {} message SandboxStatusRequest { string sandbox_id = 1; bool verbose = 2; } message SandboxStatusResponse { string sandbox_id = 1; uint32 pid = 2; string state = 3; map info = 4; google.protobuf.Timestamp created_at = 5; google.protobuf.Timestamp exited_at = 6; google.protobuf.Any extra = 7; } message PingRequest { string sandbox_id = 1; } message PingResponse {} message ShutdownSandboxRequest { string sandbox_id = 1; } message ShutdownSandboxResponse {} message SandboxMetricsRequest { string sandbox_id = 1; } message SandboxMetricsResponse { containerd.types.Metric metrics = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.task.v2; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; import "types/mount.proto"; import "types/task/task.proto"; option go_package = "github.com/containerd/containerd/api/runtime/task/v2;task"; // Shim service is launched for each container and is responsible for owning the IO // for the container and its additional processes. The shim is also the parent of // each container and allows reattaching to the IO and receiving the exit status // for the container processes. service Task { rpc State(StateRequest) returns (StateResponse); rpc Create(CreateTaskRequest) returns (CreateTaskResponse); rpc Start(StartRequest) returns (StartResponse); rpc Delete(DeleteRequest) returns (DeleteResponse); rpc Pids(PidsRequest) returns (PidsResponse); rpc Pause(PauseRequest) returns (google.protobuf.Empty); rpc Resume(ResumeRequest) returns (google.protobuf.Empty); rpc Checkpoint(CheckpointTaskRequest) returns (google.protobuf.Empty); rpc Kill(KillRequest) returns (google.protobuf.Empty); rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty); rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty); rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty); rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty); rpc Wait(WaitRequest) returns (WaitResponse); rpc Stats(StatsRequest) returns (StatsResponse); rpc Connect(ConnectRequest) returns (ConnectResponse); rpc Shutdown(ShutdownRequest) returns (google.protobuf.Empty); } message CreateTaskRequest { string id = 1; string bundle = 2; repeated containerd.types.Mount rootfs = 3; bool terminal = 4; string stdin = 5; string stdout = 6; string stderr = 7; string checkpoint = 8; string parent_checkpoint = 9; google.protobuf.Any options = 10; } message CreateTaskResponse { uint32 pid = 1; } message DeleteRequest { string id = 1; string exec_id = 2; } message DeleteResponse { uint32 pid = 1; uint32 exit_status = 2; google.protobuf.Timestamp exited_at = 3; } message ExecProcessRequest { string id = 1; string exec_id = 2; bool terminal = 3; string stdin = 4; string stdout = 5; string stderr = 6; google.protobuf.Any spec = 7; } message ExecProcessResponse {} message ResizePtyRequest { string id = 1; string exec_id = 2; uint32 width = 3; uint32 height = 4; } message StateRequest { string id = 1; string exec_id = 2; } message StateResponse { string id = 1; string bundle = 2; uint32 pid = 3; containerd.v1.types.Status status = 4; string stdin = 5; string stdout = 6; string stderr = 7; bool terminal = 8; uint32 exit_status = 9; google.protobuf.Timestamp exited_at = 10; string exec_id = 11; } message KillRequest { string id = 1; string exec_id = 2; uint32 signal = 3; bool all = 4; } message CloseIORequest { string id = 1; string exec_id = 2; bool stdin = 3; } message PidsRequest { string id = 1; } message PidsResponse { repeated containerd.v1.types.ProcessInfo processes = 1; } message CheckpointTaskRequest { string id = 1; string path = 2; google.protobuf.Any options = 3; } message UpdateTaskRequest { string id = 1; google.protobuf.Any resources = 2; map annotations = 3; } message StartRequest { string id = 1; string exec_id = 2; } message StartResponse { uint32 pid = 1; } message WaitRequest { string id = 1; string exec_id = 2; } message WaitResponse { uint32 exit_status = 1; google.protobuf.Timestamp exited_at = 2; } message StatsRequest { string id = 1; } message StatsResponse { google.protobuf.Any stats = 1; } message ConnectRequest { string id = 1; } message ConnectResponse { uint32 shim_pid = 1; uint32 task_pid = 2; string version = 3; } message ShutdownRequest { string id = 1; bool now = 2; } message PauseRequest { string id = 1; } message ResumeRequest { string id = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.events.ttrpc.v1; import "google/protobuf/empty.proto"; import "types/event.proto"; option go_package = "github.com/containerd/containerd/api/services/ttrpc/events/v1;events"; service Events { // Forward sends an event that has already been packaged into an envelope // with a timestamp and namespace. // // This is useful if earlier timestamping is required or when forwarding on // behalf of another component, namespace or publisher. rpc Forward(ForwardRequest) returns (google.protobuf.Empty); } message ForwardRequest { containerd.types.Envelope envelope = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/descriptor.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; option go_package = "github.com/containerd/containerd/api/types;types"; // Descriptor describes a blob in a content store. // // This descriptor can be used to reference content from an // oci descriptor found in a manifest. // See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor message Descriptor { string media_type = 1; string digest = 2; int64 size = 3; map annotations = 5; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/event.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; import "types/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; message Envelope { option (containerd.types.fieldpath) = true; google.protobuf.Timestamp timestamp = 1; string namespace = 2; string topic = 3; google.protobuf.Any event = 4; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/fieldpath.proto ================================================ // Protocol Buffers for Go with Gadgets // // Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package containerd.types; import "google/protobuf/descriptor.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; extend google.protobuf.FileOptions { optional bool fieldpath_all = 63300; } extend google.protobuf.MessageOptions { optional bool fieldpath = 64400; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/introspection.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; message RuntimeRequest { string runtime_path = 1; // Options correspond to CreateTaskRequest.options. // This is needed to pass the runc binary path, etc. google.protobuf.Any options = 2; } message RuntimeVersion { string version = 1; string revision = 2; } message RuntimeInfo { string name = 1; RuntimeVersion version = 2; // Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.) google.protobuf.Any options = 3; // OCI-compatible runtimes should use https://github.com/opencontainers/runtime-spec/blob/main/features.md google.protobuf.Any features = 4; // Annotations of the shim. Irrelevant to features.Annotations. map annotations = 5; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/metrics.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; message Metric { google.protobuf.Timestamp timestamp = 1; string id = 2; google.protobuf.Any data = 3; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/mount.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; // Mount describes mounts for a container. // // This type is the lingua franca of ContainerD. All services provide mounts // to be used with the container at creation time. // // The Mount type follows the structure of the mount syscall, including a type, // source, target and options. message Mount { // Type defines the nature of the mount. string type = 1; // Source specifies the name of the mount. Depending on mount type, this // may be a volume name or a host path, or even ignored. string source = 2; // Target path in container string target = 3; // Options specifies zero or more fstab style mount options. repeated string options = 4; } message ActiveMount { Mount mount = 1; google.protobuf.Timestamp mounted_at = 2; string mount_point = 3; map data = 4; } message ActivationInfo { string name = 1; repeated ActiveMount active = 2; repeated Mount system = 3; map labels = 4; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/platform.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; option go_package = "github.com/containerd/containerd/api/types;types"; // Platform follows the structure of the OCI platform specification, from // descriptors. message Platform { string os = 1; string architecture = 2; string variant = 3; string os_version = 4; repeated string os_features = 5; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/runc/options/oci.proto ================================================ syntax = "proto3"; package containerd.runc.v1; option go_package = "github.com/containerd/containerd/api/types/runc/options;options"; message Options { // disable pivot root when creating a container bool no_pivot_root = 1; // create a new keyring for the container bool no_new_keyring = 2; // place the shim in a cgroup string shim_cgroup = 3; // set the I/O's pipes uid uint32 io_uid = 4; // set the I/O's pipes gid uint32 io_gid = 5; // binary name of the runc binary string binary_name = 6; // runc root directory string root = 7; // criu binary path. // // Removed in containerd v2.0: string criu_path = 8; reserved 8; // enable systemd cgroups bool systemd_cgroup = 9; // criu image path string criu_image_path = 10; // criu work path string criu_work_path = 11; // task api address, can be a unix domain socket, or vsock address. // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://:. string task_api_address = 12; // task api version, currently supported value is 2 and 3. uint32 task_api_version = 13; } message CheckpointOptions { // exit the container after a checkpoint bool exit = 1; // checkpoint open tcp connections bool open_tcp = 2; // checkpoint external unix sockets bool external_unix_sockets = 3; // checkpoint terminals (ptys) bool terminal = 4; // allow checkpointing of file locks bool file_locks = 5; // restore provided namespaces as empty namespaces repeated string empty_namespaces = 6; // set the cgroups mode, soft, full, strict string cgroups_mode = 7; // checkpoint image path string image_path = 8; // checkpoint work path string work_path = 9; } message ProcessDetails { // exec process id if the process is managed by a shim string exec_id = 1; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/sandbox.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; // Sandbox represents a sandbox metadata object that keeps all info required by controller to // work with a particular instance. message Sandbox { // SandboxID is a unique instance identifier within namespace string sandbox_id = 1; message Runtime { // Name is the name of the runtime. string name = 1; // Options specify additional runtime initialization options for the shim (this data will be available in StartShim). // Typically this data expected to be runtime shim implementation specific. google.protobuf.Any options = 2; } // Runtime specifies which runtime to use for executing this container. Runtime runtime = 2; // Spec is sandbox configuration (kin of OCI runtime spec), spec's data will be written to a config.json file in the // bundle directory (similary to OCI spec). google.protobuf.Any spec = 3; // Labels provides an area to include arbitrary data on containers. map labels = 4; // CreatedAt is the time the container was first created. google.protobuf.Timestamp created_at = 5; // UpdatedAt is the last time the container was mutated. google.protobuf.Timestamp updated_at = 6; // Extensions allow clients to provide optional blobs that can be handled by runtime. map extensions = 7; // Sandboxer is the name of the sandbox controller who manages the sandbox. string sandboxer = 10; } ================================================ FILE: crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/task/task.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.v1.types; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types/task"; enum Status { UNKNOWN = 0; CREATED = 1; RUNNING = 2; STOPPED = 3; PAUSED = 4; PAUSING = 5; } message Process { string container_id = 1; string id = 2; uint32 pid = 3; Status status = 4; string stdin = 5; string stdout = 6; string stderr = 7; bool terminal = 8; uint32 exit_status = 9; google.protobuf.Timestamp exited_at = 10; } message ProcessInfo { // PID is the process ID. uint32 pid = 1; // Info contains additional process information. // // Info varies by platform. google.protobuf.Any info = 2; } ================================================ FILE: crates/shim-protos/vendor/gogoproto/gogo.proto ================================================ // Protocol Buffers for Go with Gadgets // // Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto2"; package gogoproto; import "google/protobuf/descriptor.proto"; option java_package = "com.google.protobuf"; option java_outer_classname = "GoGoProtos"; option go_package = "github.com/gogo/protobuf/gogoproto"; extend google.protobuf.EnumOptions { optional bool goproto_enum_prefix = 62001; optional bool goproto_enum_stringer = 62021; optional bool enum_stringer = 62022; optional string enum_customname = 62023; optional bool enumdecl = 62024; } extend google.protobuf.EnumValueOptions { optional string enumvalue_customname = 66001; } extend google.protobuf.FileOptions { optional bool goproto_getters_all = 63001; optional bool goproto_enum_prefix_all = 63002; optional bool goproto_stringer_all = 63003; optional bool verbose_equal_all = 63004; optional bool face_all = 63005; optional bool gostring_all = 63006; optional bool populate_all = 63007; optional bool stringer_all = 63008; optional bool onlyone_all = 63009; optional bool equal_all = 63013; optional bool description_all = 63014; optional bool testgen_all = 63015; optional bool benchgen_all = 63016; optional bool marshaler_all = 63017; optional bool unmarshaler_all = 63018; optional bool stable_marshaler_all = 63019; optional bool sizer_all = 63020; optional bool goproto_enum_stringer_all = 63021; optional bool enum_stringer_all = 63022; optional bool unsafe_marshaler_all = 63023; optional bool unsafe_unmarshaler_all = 63024; optional bool goproto_extensions_map_all = 63025; optional bool goproto_unrecognized_all = 63026; optional bool gogoproto_import = 63027; optional bool protosizer_all = 63028; optional bool compare_all = 63029; optional bool typedecl_all = 63030; optional bool enumdecl_all = 63031; optional bool goproto_registration = 63032; optional bool messagename_all = 63033; optional bool goproto_sizecache_all = 63034; optional bool goproto_unkeyed_all = 63035; } extend google.protobuf.MessageOptions { optional bool goproto_getters = 64001; optional bool goproto_stringer = 64003; optional bool verbose_equal = 64004; optional bool face = 64005; optional bool gostring = 64006; optional bool populate = 64007; optional bool stringer = 67008; optional bool onlyone = 64009; optional bool equal = 64013; optional bool description = 64014; optional bool testgen = 64015; optional bool benchgen = 64016; optional bool marshaler = 64017; optional bool unmarshaler = 64018; optional bool stable_marshaler = 64019; optional bool sizer = 64020; optional bool unsafe_marshaler = 64023; optional bool unsafe_unmarshaler = 64024; optional bool goproto_extensions_map = 64025; optional bool goproto_unrecognized = 64026; optional bool protosizer = 64028; optional bool compare = 64029; optional bool typedecl = 64030; optional bool messagename = 64033; optional bool goproto_sizecache = 64034; optional bool goproto_unkeyed = 64035; } extend google.protobuf.FieldOptions { optional bool nullable = 65001; optional bool embed = 65002; optional string customtype = 65003; optional string customname = 65004; optional string jsontag = 65005; optional string moretags = 65006; optional string casttype = 65007; optional string castkey = 65008; optional string castvalue = 65009; optional bool stdtime = 65010; optional bool stdduration = 65011; optional bool wktpointer = 65012; } ================================================ FILE: crates/shim-protos/vendor/google/protobuf/any.proto ================================================ // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option go_package = "google.golang.org/protobuf/types/known/anypb"; option java_package = "com.google.protobuf"; option java_outer_classname = "AnyProto"; option java_multiple_files = true; option objc_class_prefix = "GPB"; // `Any` contains an arbitrary serialized protocol buffer message along with a // URL that describes the type of the serialized message. // // Protobuf library provides support to pack/unpack Any values in the form // of utility functions or additional generated methods of the Any type. // // Example 1: Pack and unpack a message in C++. // // Foo foo = ...; // Any any; // any.PackFrom(foo); // ... // if (any.UnpackTo(&foo)) { // ... // } // // Example 2: Pack and unpack a message in Java. // // Foo foo = ...; // Any any = Any.pack(foo); // ... // if (any.is(Foo.class)) { // foo = any.unpack(Foo.class); // } // // Example 3: Pack and unpack a message in Python. // // foo = Foo(...) // any = Any() // any.Pack(foo) // ... // if any.Is(Foo.DESCRIPTOR): // any.Unpack(foo) // ... // // Example 4: Pack and unpack a message in Go // // foo := &pb.Foo{...} // any, err := anypb.New(foo) // if err != nil { // ... // } // ... // foo := &pb.Foo{} // if err := any.UnmarshalTo(foo); err != nil { // ... // } // // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack // methods only use the fully qualified type name after the last '/' // in the type URL, for example "foo.bar.com/x/y.z" will yield type // name "y.z". // // // JSON // ==== // The JSON representation of an `Any` value uses the regular // representation of the deserialized, embedded message, with an // additional field `@type` which contains the type URL. Example: // // package google.profile; // message Person { // string first_name = 1; // string last_name = 2; // } // // { // "@type": "type.googleapis.com/google.profile.Person", // "firstName": , // "lastName": // } // // If the embedded message type is well-known and has a custom JSON // representation, that representation will be embedded adding a field // `value` which holds the custom JSON in addition to the `@type` // field. Example (for message [google.protobuf.Duration][]): // // { // "@type": "type.googleapis.com/google.protobuf.Duration", // "value": "1.212s" // } // message Any { // A URL/resource name that uniquely identifies the type of the serialized // protocol buffer message. This string must contain at least // one "/" character. The last segment of the URL's path must represent // the fully qualified name of the type (as in // `path/google.protobuf.Duration`). The name should be in a canonical form // (e.g., leading "." is not accepted). // // In practice, teams usually precompile into the binary all types that they // expect it to use in the context of Any. However, for URLs which use the // scheme `http`, `https`, or no scheme, one can optionally set up a type // server that maps type URLs to message definitions as follows: // // * If no scheme is provided, `https` is assumed. // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the // URL, or have them precompiled into a binary to avoid any // lookup. Therefore, binary compatibility needs to be preserved // on changes to types. (Use versioned type names to manage // breaking changes.) // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with // type.googleapis.com. // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // string type_url = 1; // Must be a valid serialized protocol buffer of the above specified type. bytes value = 2; } ================================================ FILE: crates/shim-protos/vendor/google/protobuf/descriptor.proto ================================================ // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Author: kenton@google.com (Kenton Varda) // Based on original Protocol Buffers design by // Sanjay Ghemawat, Jeff Dean, and others. // // The messages in this file describe the definitions found in .proto files. // A valid .proto file can be translated directly to a FileDescriptorProto // without any other information (e.g. without reading its imports). syntax = "proto2"; package google.protobuf; option go_package = "google.golang.org/protobuf/types/descriptorpb"; option java_package = "com.google.protobuf"; option java_outer_classname = "DescriptorProtos"; option csharp_namespace = "Google.Protobuf.Reflection"; option objc_class_prefix = "GPB"; option cc_enable_arenas = true; // descriptor.proto must be optimized for speed because reflection-based // algorithms don't work during bootstrapping. option optimize_for = SPEED; // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. message FileDescriptorSet { repeated FileDescriptorProto file = 1; } // Describes a complete .proto file. message FileDescriptorProto { optional string name = 1; // file name, relative to root of source tree optional string package = 2; // e.g. "foo", "foo.bar", etc. // Names of files imported by this file. repeated string dependency = 3; // Indexes of the public imported files in the dependency list above. repeated int32 public_dependency = 10; // Indexes of the weak imported files in the dependency list. // For Google-internal migration only. Do not use. repeated int32 weak_dependency = 11; // All top-level definitions in this file. repeated DescriptorProto message_type = 4; repeated EnumDescriptorProto enum_type = 5; repeated ServiceDescriptorProto service = 6; repeated FieldDescriptorProto extension = 7; optional FileOptions options = 8; // This field contains optional information about the original source code. // You may safely remove this entire field without harming runtime // functionality of the descriptors -- the information is needed only by // development tools. optional SourceCodeInfo source_code_info = 9; // The syntax of the proto file. // The supported values are "proto2" and "proto3". optional string syntax = 12; } // Describes a message type. message DescriptorProto { optional string name = 1; repeated FieldDescriptorProto field = 2; repeated FieldDescriptorProto extension = 6; repeated DescriptorProto nested_type = 3; repeated EnumDescriptorProto enum_type = 4; message ExtensionRange { optional int32 start = 1; // Inclusive. optional int32 end = 2; // Exclusive. optional ExtensionRangeOptions options = 3; } repeated ExtensionRange extension_range = 5; repeated OneofDescriptorProto oneof_decl = 8; optional MessageOptions options = 7; // Range of reserved tag numbers. Reserved tag numbers may not be used by // fields or extension ranges in the same message. Reserved ranges may // not overlap. message ReservedRange { optional int32 start = 1; // Inclusive. optional int32 end = 2; // Exclusive. } repeated ReservedRange reserved_range = 9; // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. repeated string reserved_name = 10; } message ExtensionRangeOptions { // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } // Describes a field within a message. message FieldDescriptorProto { enum Type { // 0 is reserved for errors. // Order is weird for historical reasons. TYPE_DOUBLE = 1; TYPE_FLOAT = 2; // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if // negative values are likely. TYPE_INT64 = 3; TYPE_UINT64 = 4; // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if // negative values are likely. TYPE_INT32 = 5; TYPE_FIXED64 = 6; TYPE_FIXED32 = 7; TYPE_BOOL = 8; TYPE_STRING = 9; // Tag-delimited aggregate. // Group type is deprecated and not supported in proto3. However, Proto3 // implementations should still be able to parse the group wire format and // treat group fields as unknown fields. TYPE_GROUP = 10; TYPE_MESSAGE = 11; // Length-delimited aggregate. // New in version 2. TYPE_BYTES = 12; TYPE_UINT32 = 13; TYPE_ENUM = 14; TYPE_SFIXED32 = 15; TYPE_SFIXED64 = 16; TYPE_SINT32 = 17; // Uses ZigZag encoding. TYPE_SINT64 = 18; // Uses ZigZag encoding. } enum Label { // 0 is reserved for errors LABEL_OPTIONAL = 1; LABEL_REQUIRED = 2; LABEL_REPEATED = 3; } optional string name = 1; optional int32 number = 3; optional Label label = 4; // If type_name is set, this need not be set. If both this and type_name // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. optional Type type = 5; // For message and enum types, this is the name of the type. If the name // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping // rules are used to find the type (i.e. first the nested types within this // message are searched, then within the parent, on up to the root // namespace). optional string type_name = 6; // For extensions, this is the name of the type being extended. It is // resolved in the same manner as type_name. optional string extendee = 2; // For numeric types, contains the original text representation of the value. // For booleans, "true" or "false". // For strings, contains the default text contents (not escaped in any way). // For bytes, contains the C escaped value. All bytes >= 128 are escaped. // TODO(kenton): Base-64 encode? optional string default_value = 7; // If set, gives the index of a oneof in the containing type's oneof_decl // list. This field is a member of that oneof. optional int32 oneof_index = 9; // JSON name of this field. The value is set by protocol compiler. If the // user has set a "json_name" option on this field, that option's value // will be used. Otherwise, it's deduced from the field's name by converting // it to camelCase. optional string json_name = 10; optional FieldOptions options = 8; // If true, this is a proto3 "optional". When a proto3 field is optional, it // tracks presence regardless of field type. // // When proto3_optional is true, this field must be belong to a oneof to // signal to old proto3 clients that presence is tracked for this field. This // oneof is known as a "synthetic" oneof, and this field must be its sole // member (each proto3 optional field gets its own synthetic oneof). Synthetic // oneofs exist in the descriptor only, and do not generate any API. Synthetic // oneofs must be ordered after all "real" oneofs. // // For message fields, proto3_optional doesn't create any semantic change, // since non-repeated message fields always track presence. However it still // indicates the semantic detail of whether the user wrote "optional" or not. // This can be useful for round-tripping the .proto file. For consistency we // give message fields a synthetic oneof also, even though it is not required // to track presence. This is especially important because the parser can't // tell if a field is a message or an enum, so it must always create a // synthetic oneof. // // Proto2 optional fields do not set this flag, because they already indicate // optional with `LABEL_OPTIONAL`. optional bool proto3_optional = 17; } // Describes a oneof. message OneofDescriptorProto { optional string name = 1; optional OneofOptions options = 2; } // Describes an enum type. message EnumDescriptorProto { optional string name = 1; repeated EnumValueDescriptorProto value = 2; optional EnumOptions options = 3; // Range of reserved numeric values. Reserved values may not be used by // entries in the same enum. Reserved ranges may not overlap. // // Note that this is distinct from DescriptorProto.ReservedRange in that it // is inclusive such that it can appropriately represent the entire int32 // domain. message EnumReservedRange { optional int32 start = 1; // Inclusive. optional int32 end = 2; // Inclusive. } // Range of reserved numeric values. Reserved numeric values may not be used // by enum values in the same enum declaration. Reserved ranges may not // overlap. repeated EnumReservedRange reserved_range = 4; // Reserved enum value names, which may not be reused. A given name may only // be reserved once. repeated string reserved_name = 5; } // Describes a value within an enum. message EnumValueDescriptorProto { optional string name = 1; optional int32 number = 2; optional EnumValueOptions options = 3; } // Describes a service. message ServiceDescriptorProto { optional string name = 1; repeated MethodDescriptorProto method = 2; optional ServiceOptions options = 3; } // Describes a method of a service. message MethodDescriptorProto { optional string name = 1; // Input and output type names. These are resolved in the same way as // FieldDescriptorProto.type_name, but must refer to a message type. optional string input_type = 2; optional string output_type = 3; optional MethodOptions options = 4; // Identifies if client streams multiple client messages optional bool client_streaming = 5 [default = false]; // Identifies if server streams multiple server messages optional bool server_streaming = 6 [default = false]; } // =================================================================== // Options // Each of the definitions above may have "options" attached. These are // just annotations which may cause code to be generated slightly differently // or may contain hints for code that manipulates protocol messages. // // Clients may define custom options as extensions of the *Options messages. // These extensions may not yet be known at parsing time, so the parser cannot // store the values in them. Instead it stores them in a field in the *Options // message called uninterpreted_option. This field must have the same name // across all *Options messages. We then use this field to populate the // extensions when we build a descriptor, at which point all protos have been // parsed and so all extensions are known. // // Extension numbers for custom options may be chosen as follows: // * For options which will only be used within a single application or // organization, or for experimental options, use field numbers 50000 // through 99999. It is up to you to ensure that you do not use the // same number for multiple options. // * For options which will be published and used publicly by multiple // independent entities, e-mail protobuf-global-extension-registry@google.com // to reserve extension numbers. Simply provide your project name (e.g. // Objective-C plugin) and your project website (if available) -- there's no // need to explain how you intend to use them. Usually you only need one // extension number. You can declare multiple options with only one extension // number by putting them in a sub-message. See the Custom Options section of // the docs for examples: // https://developers.google.com/protocol-buffers/docs/proto#options // If this turns out to be popular, a web service will be set up // to automatically assign option numbers. message FileOptions { // Sets the Java package where classes generated from this .proto will be // placed. By default, the proto package is used, but this is often // inappropriate because proto packages do not normally start with backwards // domain names. optional string java_package = 1; // Controls the name of the wrapper Java class generated for the .proto file. // That class will always contain the .proto file's getDescriptor() method as // well as any top-level extensions defined in the .proto file. // If java_multiple_files is disabled, then all the other classes from the // .proto file will be nested inside the single wrapper outer class. optional string java_outer_classname = 8; // If enabled, then the Java code generator will generate a separate .java // file for each top-level message, enum, and service defined in the .proto // file. Thus, these types will *not* be nested inside the wrapper class // named by java_outer_classname. However, the wrapper class will still be // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. optional bool java_multiple_files = 10 [default = false]; // This option does nothing. optional bool java_generate_equals_and_hash = 20 [deprecated=true]; // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. // Message reflection will do the same. // However, an extension field still accepts non-UTF-8 byte sequences. // This option has no effect on when used with the lite runtime. optional bool java_string_check_utf8 = 27 [default = false]; // Generated classes can be optimized for speed or code size. enum OptimizeMode { SPEED = 1; // Generate complete code for parsing, serialization, // etc. CODE_SIZE = 2; // Use ReflectionOps to implement these methods. LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. } optional OptimizeMode optimize_for = 9 [default = SPEED]; // Sets the Go package where structs generated from this .proto will be // placed. If omitted, the Go package will be derived from the following: // - The basename of the package import path, if provided. // - Otherwise, the package statement in the .proto file, if present. // - Otherwise, the basename of the .proto file, without extension. optional string go_package = 11; // Should generic services be generated in each language? "Generic" services // are not specific to any particular RPC system. They are generated by the // main code generators in each language (without additional plugins). // Generic services were the only kind of service generation supported by // early versions of google.protobuf. // // Generic services are now considered deprecated in favor of using plugins // that generate code specific to your particular RPC system. Therefore, // these default to false. Old code which depends on generic services should // explicitly set them to true. optional bool cc_generic_services = 16 [default = false]; optional bool java_generic_services = 17 [default = false]; optional bool py_generic_services = 18 [default = false]; optional bool php_generic_services = 42 [default = false]; // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very // least, this is a formalization for deprecating files. optional bool deprecated = 23 [default = false]; // Enables the use of arenas for the proto messages in this file. This applies // only to generated classes for C++. optional bool cc_enable_arenas = 31 [default = true]; // Sets the objective c class prefix which is prepended to all objective c // generated classes from this .proto. There is no default. optional string objc_class_prefix = 36; // Namespace for generated classes; defaults to the package. optional string csharp_namespace = 37; // By default Swift generators will take the proto package and CamelCase it // replacing '.' with underscore and use that to prefix the types/symbols // defined. When this options is provided, they will use this value instead // to prefix the types/symbols defined. optional string swift_prefix = 39; // Sets the php class prefix which is prepended to all php generated classes // from this .proto. Default is empty. optional string php_class_prefix = 40; // Use this option to change the namespace of php generated classes. Default // is empty. When this option is empty, the package name will be used for // determining the namespace. optional string php_namespace = 41; // Use this option to change the namespace of php generated metadata classes. // Default is empty. When this option is empty, the proto file name will be // used for determining the namespace. optional string php_metadata_namespace = 44; // Use this option to change the package of ruby generated classes. Default // is empty. When this option is not set, the package name will be used for // determining the ruby package. optional string ruby_package = 45; // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. // See the documentation for the "Options" section above. extensions 1000 to max; reserved 38; } message MessageOptions { // Set true to use the old proto1 MessageSet wire format for extensions. // This is provided for backwards-compatibility with the MessageSet wire // format. You should not use this for any other reason: It's less // efficient, has fewer features, and is more complicated. // // The message must be defined exactly as follows: // message Foo { // option message_set_wire_format = true; // extensions 4 to max; // } // Note that the message cannot have any defined fields; MessageSets only // have extensions. // // All extensions of your type must be singular messages; e.g. they cannot // be int32s, enums, or repeated messages. // // Because this is an option, the above two restrictions are not enforced by // the protocol compiler. optional bool message_set_wire_format = 1 [default = false]; // Disables the generation of the standard "descriptor()" accessor, which can // conflict with a field of the same name. This is meant to make migration // from proto1 easier; new code should avoid fields named "descriptor". optional bool no_standard_descriptor_accessor = 2 [default = false]; // Is this message deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. optional bool deprecated = 3 [default = false]; reserved 4, 5, 6; // Whether the message is an automatically generated map entry type for the // maps field. // // For maps fields: // map map_field = 1; // The parsed descriptor looks like: // message MapFieldEntry { // option map_entry = true; // optional KeyType key = 1; // optional ValueType value = 2; // } // repeated MapFieldEntry map_field = 1; // // Implementations may choose not to generate the map_entry=true message, but // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. // // NOTE: Do not set the option in .proto files. Always use the maps syntax // instead. The option should only be implicitly set by the proto compiler // parser. optional bool map_entry = 7; reserved 8; // javalite_serializable reserved 9; // javanano_as_lite // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message FieldOptions { // The ctype option instructs the C++ code generator to use a different // representation of the field than it normally would. See the specific // options below. This option is not yet implemented in the open source // release -- sorry, we'll try to include it in a future version! optional CType ctype = 1 [default = STRING]; enum CType { // Default mode. STRING = 0; CORD = 1; STRING_PIECE = 2; } // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as // a single length-delimited blob. In proto3, only explicit setting it to // false will avoid using packed encoding. optional bool packed = 2; // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING // is represented as JavaScript string, which avoids loss of precision that // can happen when a large value is converted to a floating point JavaScript. // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to // use the JavaScript "number" type. The behavior of the default option // JS_NORMAL is implementation dependent. // // This option is an enum to permit additional types to be added, e.g. // goog.math.Integer. optional JSType jstype = 6 [default = JS_NORMAL]; enum JSType { // Use the default type. JS_NORMAL = 0; // Use JavaScript strings. JS_STRING = 1; // Use JavaScript numbers. JS_NUMBER = 2; } // Should this field be parsed lazily? Lazy applies only to message-type // fields. It means that when the outer message is initially parsed, the // inner message's contents will not be parsed but instead stored in encoded // form. The inner message will actually be parsed when it is first accessed. // // This is only a hint. Implementations are free to choose whether to use // eager or lazy parsing regardless of the value of this option. However, // setting this option true suggests that the protocol author believes that // using lazy parsing on this field is worth the additional bookkeeping // overhead typically needed to implement it. // // This option does not affect the public interface of any generated code; // all method signatures remain the same. Furthermore, thread-safety of the // interface is not affected by this option; const methods remain safe to // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // // // Note that implementations may choose not to check required fields within // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. // This is necessary because otherwise the inner message would have to be // parsed in order to perform the check, defeating the purpose of lazy // parsing. An implementation which chooses not to check required fields // must be consistent about it. That is, for any particular sub-message, the // implementation must either *always* check its required fields, or *never* // check its required fields, regardless of whether or not the message has // been parsed. optional bool lazy = 5 [default = false]; // Is this field deprecated? // Depending on the target platform, this can emit Deprecated annotations // for accessors, or it will be completely ignored; in the very least, this // is a formalization for deprecating fields. optional bool deprecated = 3 [default = false]; // For Google-internal migration only. Do not use. optional bool weak = 10 [default = false]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; reserved 4; // removed jtype } message OneofOptions { // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message EnumOptions { // Set this option to true to allow mapping different tag names to the same // value. optional bool allow_alias = 2; // Is this enum deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the enum, or it will be completely ignored; in the very least, this // is a formalization for deprecating enums. optional bool deprecated = 3 [default = false]; reserved 5; // javanano_as_lite // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message EnumValueOptions { // Is this enum value deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the enum value, or it will be completely ignored; in the very least, // this is a formalization for deprecating enum values. optional bool deprecated = 1 [default = false]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message ServiceOptions { // Note: Field numbers 1 through 32 are reserved for Google's internal RPC // framework. We apologize for hoarding these numbers to ourselves, but // we were already using them long before we decided to release Protocol // Buffers. // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the service, or it will be completely ignored; in the very least, // this is a formalization for deprecating services. optional bool deprecated = 33 [default = false]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message MethodOptions { // Note: Field numbers 1 through 32 are reserved for Google's internal RPC // framework. We apologize for hoarding these numbers to ourselves, but // we were already using them long before we decided to release Protocol // Buffers. // Is this method deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the method, or it will be completely ignored; in the very least, // this is a formalization for deprecating methods. optional bool deprecated = 33 [default = false]; // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, // or neither? HTTP based RPC implementation may choose GET verb for safe // methods, and PUT verb for idempotent methods instead of the default POST. enum IdempotencyLevel { IDEMPOTENCY_UNKNOWN = 0; NO_SIDE_EFFECTS = 1; // implies idempotent IDEMPOTENT = 2; // idempotent, but may have side effects } optional IdempotencyLevel idempotency_level = 34 [default = IDEMPOTENCY_UNKNOWN]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } // A message representing a option the parser does not recognize. This only // appears in options protos created by the compiler::Parser class. // DescriptorPool resolves these when building Descriptor objects. Therefore, // options protos in descriptor objects (e.g. returned by Descriptor::options(), // or produced by Descriptor::CopyTo()) will never have UninterpretedOptions // in them. message UninterpretedOption { // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents // "foo.(bar.baz).qux". message NamePart { required string name_part = 1; required bool is_extension = 2; } repeated NamePart name = 2; // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. optional string identifier_value = 3; optional uint64 positive_int_value = 4; optional int64 negative_int_value = 5; optional double double_value = 6; optional bytes string_value = 7; optional string aggregate_value = 8; } // =================================================================== // Optional source code info // Encapsulates information about the original source file from which a // FileDescriptorProto was generated. message SourceCodeInfo { // A Location identifies a piece of source code in a .proto file which // corresponds to a particular definition. This information is intended // to be useful to IDEs, code indexers, documentation generators, and similar // tools. // // For example, say we have a file like: // message Foo { // optional string foo = 1; // } // Let's look at just the field definition: // optional string foo = 1; // ^ ^^ ^^ ^ ^^^ // a bc de f ghi // We have the following locations: // span path represents // [a,i) [ 4, 0, 2, 0 ] The whole field definition. // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). // // Notes: // - A location may refer to a repeated field itself (i.e. not to any // particular index within it). This is used whenever a set of elements are // logically enclosed in a single code segment. For example, an entire // extend block (possibly containing multiple extension definitions) will // have an outer location whose path refers to the "extensions" repeated // field without an index. // - Multiple locations may have the same path. This happens when a single // logical declaration is spread out across multiple places. The most // obvious example is the "extend" block again -- there may be multiple // extend blocks in the same scope, each of which will have the same path. // - A location's span is not always a subset of its parent's span. For // example, the "extendee" of an extension declaration appears at the // beginning of the "extend" block and is shared by all extensions within // the block. // - Just because a location's span is a subset of some other location's span // does not mean that it is a descendant. For example, a "group" defines // both a type and a field in a single declaration. Thus, the locations // corresponding to the type and field and their components will overlap. // - Code which tries to interpret locations should probably be designed to // ignore those that it doesn't understand, as more types of locations could // be recorded in the future. repeated Location location = 1; message Location { // Identifies which part of the FileDescriptorProto was defined at this // location. // // Each element is a field number or an index. They form a path from // the root FileDescriptorProto to the place where the definition. For // example, this path: // [ 4, 3, 2, 7, 1 ] // refers to: // file.message_type(3) // 4, 3 // .field(7) // 2, 7 // .name() // 1 // This is because FileDescriptorProto.message_type has field number 4: // repeated DescriptorProto message_type = 4; // and DescriptorProto.field has field number 2: // repeated FieldDescriptorProto field = 2; // and FieldDescriptorProto.name has field number 1: // optional string name = 1; // // Thus, the above path gives the location of a field name. If we removed // the last element: // [ 4, 3, 2, 7 ] // this path refers to the whole field declaration (from the beginning // of the label to the terminating semicolon). repeated int32 path = 1 [packed = true]; // Always has exactly three or four elements: start line, start column, // end line (optional, otherwise assumed same as start line), end column. // These are packed into a single field for efficiency. Note that line // and column numbers are zero-based -- typically you will want to add // 1 to each before displaying to a user. repeated int32 span = 2 [packed = true]; // If this SourceCodeInfo represents a complete declaration, these are any // comments appearing before and after the declaration which appear to be // attached to the declaration. // // A series of line comments appearing on consecutive lines, with no other // tokens appearing on those lines, will be treated as a single comment. // // leading_detached_comments will keep paragraphs of comments that appear // before (but not connected to) the current element. Each paragraph, // separated by empty lines, will be one comment element in the repeated // field. // // Only the comment content is provided; comment markers (e.g. //) are // stripped out. For block comments, leading whitespace and an asterisk // will be stripped from the beginning of each line other than the first. // Newlines are included in the output. // // Examples: // // optional int32 foo = 1; // Comment attached to foo. // // Comment attached to bar. // optional int32 bar = 2; // // optional string baz = 3; // // Comment attached to baz. // // Another line attached to baz. // // // Comment attached to qux. // // // // Another line attached to qux. // optional double qux = 4; // // // Detached comment for corge. This is not leading or trailing comments // // to qux or corge because there are blank lines separating it from // // both. // // // Detached comment for corge paragraph 2. // // optional string corge = 5; // /* Block comment attached // * to corge. Leading asterisks // * will be removed. */ // /* Block comment attached to // * grault. */ // optional int32 grault = 6; // // // ignored detached comments. optional string leading_comments = 3; optional string trailing_comments = 4; repeated string leading_detached_comments = 6; } } // Describes the relationship between generated code and its original source // file. A GeneratedCodeInfo message is associated with only one generated // source file, but may contain references to different source .proto files. message GeneratedCodeInfo { // An Annotation connects some span of text in generated code to an element // of its generating .proto file. repeated Annotation annotation = 1; message Annotation { // Identifies the element in the original source .proto file. This field // is formatted the same as SourceCodeInfo.Location.path. repeated int32 path = 1 [packed = true]; // Identifies the filesystem path to the original source .proto. optional string source_file = 2; // Identifies the starting offset in bytes in the generated code // that relates to the identified object. optional int32 begin = 3; // Identifies the ending offset in bytes in the generated code that // relates to the identified offset. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). optional int32 end = 4; } } ================================================ FILE: crates/shim-protos/vendor/google/protobuf/empty.proto ================================================ // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option go_package = "google.golang.org/protobuf/types/known/emptypb"; option java_package = "com.google.protobuf"; option java_outer_classname = "EmptyProto"; option java_multiple_files = true; option objc_class_prefix = "GPB"; option cc_enable_arenas = true; // A generic empty message that you can re-use to avoid defining duplicated // empty messages in your APIs. A typical example is to use it as the request // or the response type of an API method. For instance: // // service Foo { // rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); // } // // The JSON representation for `Empty` is empty JSON object `{}`. message Empty {} ================================================ FILE: crates/shim-protos/vendor/google/protobuf/timestamp.proto ================================================ // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option cc_enable_arenas = true; option go_package = "google.golang.org/protobuf/types/known/timestamppb"; option java_package = "com.google.protobuf"; option java_outer_classname = "TimestampProto"; option java_multiple_files = true; option objc_class_prefix = "GPB"; // A Timestamp represents a point in time independent of any time zone or local // calendar, encoded as a count of seconds and fractions of seconds at // nanosecond resolution. The count is relative to an epoch at UTC midnight on // January 1, 1970, in the proleptic Gregorian calendar which extends the // Gregorian calendar backwards to year one. // // All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap // second table is needed for interpretation, using a [24-hour linear // smear](https://developers.google.com/time/smear). // // The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By // restricting to that range, we ensure that we can convert to and from [RFC // 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. // // # Examples // // Example 1: Compute Timestamp from POSIX `time()`. // // Timestamp timestamp; // timestamp.set_seconds(time(NULL)); // timestamp.set_nanos(0); // // Example 2: Compute Timestamp from POSIX `gettimeofday()`. // // struct timeval tv; // gettimeofday(&tv, NULL); // // Timestamp timestamp; // timestamp.set_seconds(tv.tv_sec); // timestamp.set_nanos(tv.tv_usec * 1000); // // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. // // FILETIME ft; // GetSystemTimeAsFileTime(&ft); // UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; // // // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z // // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. // Timestamp timestamp; // timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); // timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); // // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. // // long millis = System.currentTimeMillis(); // // Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) // .setNanos((int) ((millis % 1000) * 1000000)).build(); // // // Example 5: Compute Timestamp from Java `Instant.now()`. // // Instant now = Instant.now(); // // Timestamp timestamp = // Timestamp.newBuilder().setSeconds(now.getEpochSecond()) // .setNanos(now.getNano()).build(); // // // Example 6: Compute Timestamp from current time in Python. // // timestamp = Timestamp() // timestamp.GetCurrentTime() // // # JSON Mapping // // In JSON format, the Timestamp type is encoded as a string in the // [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the // format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" // where {year} is always expressed using four digits while {month}, {day}, // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone // is required. A proto3 JSON serializer should always use UTC (as indicated by // "Z") when printing the Timestamp type and a proto3 JSON parser should be // able to accept both UTC and other timezones (as indicated by an offset). // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. // // In JavaScript, one can convert a Date object to this format using the // standard // [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) // method. In Python, a standard `datetime.datetime` object can be converted // to this format using // [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with // the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use // the Joda Time's [`ISODateTimeFormat.dateTime()`]( // http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D // ) to obtain a formatter capable of generating timestamps in this format. // // message Timestamp { // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. int64 seconds = 1; // Non-negative fractions of a second at nanosecond resolution. Negative // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. int32 nanos = 2; } ================================================ FILE: crates/shim-protos/vendor/microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto ================================================ syntax = "proto3"; package containerd.runhcs.stats.v1; import "google/protobuf/timestamp.proto"; import "github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto"; option go_package = "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats;stats"; message Statistics { oneof container { WindowsContainerStatistics windows = 1; io.containerd.cgroups.v1.Metrics linux = 2; } VirtualMachineStatistics vm = 3; } message WindowsContainerStatistics { google.protobuf.Timestamp timestamp = 1; google.protobuf.Timestamp container_start_time = 2; uint64 uptime_ns = 3; WindowsContainerProcessorStatistics processor = 4; WindowsContainerMemoryStatistics memory = 5; WindowsContainerStorageStatistics storage = 6; } message WindowsContainerProcessorStatistics { uint64 total_runtime_ns = 1; uint64 runtime_user_ns = 2; uint64 runtime_kernel_ns = 3; } message WindowsContainerMemoryStatistics { uint64 memory_usage_commit_bytes = 1; uint64 memory_usage_commit_peak_bytes = 2; uint64 memory_usage_private_working_set_bytes = 3; } message WindowsContainerStorageStatistics { uint64 read_count_normalized = 1; uint64 read_size_bytes = 2; uint64 write_count_normalized = 3; uint64 write_size_bytes = 4; } message VirtualMachineStatistics { VirtualMachineProcessorStatistics processor = 1; VirtualMachineMemoryStatistics memory = 2; } message VirtualMachineProcessorStatistics { uint64 total_runtime_ns = 1; } message VirtualMachineMemoryStatistics { uint64 working_set_bytes = 1; uint32 virtual_node_count = 2; VirtualMachineMemory vm_memory = 3; } message VirtualMachineMemory { int32 available_memory = 1; int32 available_memory_buffer = 2; uint64 reserved_memory = 3; uint64 assigned_memory = 4; bool slp_active = 5; bool balancing_enabled = 6; bool dm_operation_in_progress = 7; } ================================================ FILE: crates/snapshots/Cargo.toml ================================================ [package] name = "containerd-snapshots" version = "0.3.0" authors = [ "Maksym Pavlenko ", "The containerd Authors", ] description = "Remote snapshotter extension for containerd" keywords = ["containerd", "server", "grpc", "containers"] categories = ["api-bindings", "asynchronous"] edition.workspace = true license.workspace = true repository.workspace = true homepage.workspace = true [features] docs = [] [dependencies] async-stream = "0.3.6" futures = { workspace = true, features = ["std", "alloc"] } prost = { workspace = true, features = ["derive", "std"] } prost-types = { workspace = true, features = ["std"] } serde = { workspace = true, features = ["derive", "std"] } thiserror.workspace = true tonic = { workspace = true, features = ["codegen"] } tonic-prost.workspace = true tokio-stream = { version = "0.1", default-features = false } [dev-dependencies] futures.workspace = true log.workspace = true simple_logger.workspace = true tokio = { workspace = true, features = ["macros", "rt", "sync", "net", "io-util"] } tonic = { workspace = true, features = ["server", "router"] } [build-dependencies] tonic-prost-build.workspace = true [package.metadata.docs.rs] features = ["docs"] [package.metadata.cargo-machete] ignored = ["prost", "tonic-prost"] ================================================ FILE: crates/snapshots/README.md ================================================ # Remote snapshotter extension for containerd [![Crates.io](https://img.shields.io/crates/v/containerd-snapshots)](https://crates.io/crates/containerd-snapshots) [![docs.rs](https://img.shields.io/docsrs/containerd-snapshots)](https://docs.rs/containerd-snapshots/latest/containerd_snapshots/) [![Crates.io](https://img.shields.io/crates/l/containerd-shim-logging)](https://github.com/containerd/rust-extensions/blob/main/LICENSE) [![CI](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml) Snapshots crate implements containerd's proxy plugin for snapshotting. It aims hide the underlying complexity of GRPC interfaces, streaming, and request/response conversions and provide one `Snapshots` trait to implement. [containerd Documentation](https://github.com/containerd/containerd/blob/main/docs/PLUGINS.md#proxy-plugins) ## Proxy plugins A proxy plugin is configured using containerd's config file and will be loaded alongside the internal plugins when containerd is started. These plugins are connected to containerd using a local socket serving one of containerd's GRPC API services. Each plugin is configured with a type and name just as internal plugins are. ## How to use from containerd Add the following to containerd's configuration file: ```toml [proxy_plugins] [proxy_plugins.custom] type = "snapshot" address = "/tmp/snap2.sock" ``` Start daemons and try pulling an image with `custom` snapshotter: ```bash # Start containerd daemon $ containerd --config /path/config.toml # Run remote snapshotter instance $ cargo run --example snapshotter /tmp/snap2.sock # Now specify the snapshotter when pulling an image $ ctr i pull --snapshotter custom docker.io/library/hello-world:latest ``` ## Getting started Snapshotters are required to implement `Snapshotter` trait (which is very similar to containerd's [Snapshotter](https://github.com/containerd/containerd/blob/main/core/snapshots/snapshotter.go) interface). ```rust,ignore use std::collections::HashMap; use containerd_snapshots as snapshots; use containerd_snapshots::{api, Info, Usage}; use log::info; #[derive(Default)] struct Example; #[snapshots::tonic::async_trait] impl snapshots::Snapshotter for Example { type Error = snapshots::tonic::Status; async fn stat(&self, key: String) -> Result { info!("Stat: {}", key); Ok(Info::default()) } // ... async fn commit( &self, name: String, key: String, labels: HashMap, ) -> Result<(), Self::Error> { info!("Commit: name={}, key={}, labels={:?}", name, key, labels); Ok(()) } } ``` The library provides `snapshots::server` for convenience to wrap the implementation into a GRPC server, so it can be used with `tonic` like this: ```rust,ignore use snapshots::tonic::transport::Server; Server::builder() .add_service(snapshots::server(example)) .serve_with_incoming(incoming) .await .expect("Serve failed"); ``` ================================================ FILE: crates/snapshots/build.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{env, fs, io}; const PROTO_FILES: &[&str] = &["types/mount.proto", "services/snapshots/v1/snapshots.proto"]; const FIXUP_MODULES: &[&str] = &["containerd.services.snapshots.v1"]; fn main() { tonic_prost_build::configure() .build_server(true) .compile_protos( PROTO_FILES, &["vendor/github.com/containerd/containerd/api/", "vendor/"], ) .expect("Failed to generate GRPC bindings"); for module in FIXUP_MODULES { fixup_imports(module).expect("Failed to fixup module"); } } // Original containerd's protobuf files contain Go style imports: // import "github.com/containerd/containerd/api/types/mount.proto"; // // Tonic produces invalid code for these imports: // error[E0433]: failed to resolve: there are too many leading `super` keywords // --> /containerd-rust-extensions/target/debug/build/containerd-client-protos-0a328c0c63f60cd0/out/containerd.services.diff.v1.rs:47:52 // | // 47 | pub diff: ::core::option::Option, // | ^^^^^ there are too many leading `super` keywords // // This func fixes imports to crate level ones, like `crate::types::Mount` fn fixup_imports(path: &str) -> Result<(), io::Error> { let out_dir = env::var("OUT_DIR").unwrap(); let path = format!("{}/{}.rs", out_dir, path); let contents = fs::read_to_string(&path)? .replace("super::super::super::types", "crate::api::types") .replace( "/// filters\\[0\\] or filters\\[1\\] or ... or filters\\[n-1\\] or filters\\[n\\]", r#" /// ```notrust /// filters[0] or filters[1] or ... or filters[n-1] or filters[n] /// ```"#, ); fs::write(path, contents)?; Ok(()) } ================================================ FILE: crates/snapshots/examples/snapshotter.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use std::{ collections::HashMap, env, pin::Pin, sync::Arc, task::{Context, Poll}, }; use containerd_snapshots as snapshots; use containerd_snapshots::{api, Info, Usage}; use futures::TryFutureExt; use log::info; use snapshots::tonic::transport::Server; use tokio::net::UnixListener; use tokio_stream::Stream; #[derive(Default)] struct Example; #[snapshots::tonic::async_trait] impl snapshots::Snapshotter for Example { type Error = snapshots::tonic::Status; async fn stat(&self, key: String) -> Result { info!("Stat: {}", key); Ok(Info::default()) } async fn update( &self, info: Info, fieldpaths: Option>, ) -> Result { info!("Update: info={:?}, fieldpaths={:?}", info, fieldpaths); Ok(Info::default()) } async fn usage(&self, key: String) -> Result { info!("Usage: {}", key); Ok(Usage::default()) } async fn mounts(&self, key: String) -> Result, Self::Error> { info!("Mounts: {}", key); Ok(Vec::new()) } async fn prepare( &self, key: String, parent: String, labels: HashMap, ) -> Result, Self::Error> { info!( "Prepare: key={}, parent={}, labels={:?}", key, parent, labels ); Ok(Vec::new()) } async fn view( &self, key: String, parent: String, labels: HashMap, ) -> Result, Self::Error> { info!("View: key={}, parent={}, labels={:?}", key, parent, labels); Ok(Vec::new()) } async fn commit( &self, name: String, key: String, labels: HashMap, ) -> Result<(), Self::Error> { info!("Commit: name={}, key={}, labels={:?}", name, key, labels); Ok(()) } async fn remove(&self, key: String) -> Result<(), Self::Error> { info!("Remove: {}", key); Ok(()) } type InfoStream = EmptyStream; async fn list( &self, snapshotter: String, filters: Vec, ) -> Result { info!("List: snapshotter={}, filters={:?}", snapshotter, filters); // Returns no snapshots. Ok(EmptyStream) } } struct EmptyStream; impl Stream for EmptyStream { type Item = Result; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { Poll::Ready(None) } } #[cfg(unix)] #[tokio::main(flavor = "current_thread")] async fn main() { simple_logger::SimpleLogger::new() .init() .expect("Failed to initialize logger"); let args = env::args().collect::>(); let socket_path = args .get(1) .ok_or("First argument must be socket path") .unwrap(); let example = Example; let incoming = { let uds = UnixListener::bind(socket_path).expect("Failed to bind listener"); async_stream::stream! { loop { let item = uds.accept().map_ok(|(st, _)| unix::UnixStream(st)).await; yield item; } } }; Server::builder() .add_service(snapshots::server(Arc::new(example))) .serve_with_incoming(incoming) .await .expect("Serve failed"); } // Copy-pasted from https://github.com/hyperium/tonic/blob/master/examples/src/uds/server.rs#L69 #[cfg(unix)] mod unix { use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, }; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tonic::transport::server::Connected; #[derive(Debug)] pub struct UnixStream(pub tokio::net::UnixStream); impl Connected for UnixStream { type ConnectInfo = UdsConnectInfo; fn connect_info(&self) -> Self::ConnectInfo { UdsConnectInfo { peer_addr: self.0.peer_addr().ok().map(Arc::new), peer_cred: self.0.peer_cred().ok(), } } } #[allow(dead_code)] #[derive(Clone, Debug)] pub struct UdsConnectInfo { pub peer_addr: Option>, pub peer_cred: Option, } impl AsyncRead for UnixStream { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>, ) -> Poll> { Pin::new(&mut self.0).poll_read(cx, buf) } } impl AsyncWrite for UnixStream { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { Pin::new(&mut self.0).poll_write(cx, buf) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.0).poll_flush(cx) } fn poll_shutdown( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll> { Pin::new(&mut self.0).poll_shutdown(cx) } } } #[cfg(not(unix))] fn main() { panic!("The snapshotter example only works on unix systems!"); } ================================================ FILE: crates/snapshots/rsync.txt ================================================ api/services/snapshots/v1/snapshots.proto api/types/mount.proto ================================================ FILE: crates/snapshots/src/convert.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ //! Various conversions between GRPC and native types. use std::convert::{TryFrom, TryInto}; use thiserror::Error; use tonic::Status; use crate::{api::snapshots::v1 as grpc, Info, Kind}; impl From for i32 { fn from(kind: Kind) -> i32 { match kind { Kind::Unknown => 0, Kind::View => 1, Kind::Active => 2, Kind::Committed => 3, } } } impl TryFrom for Kind { type Error = Error; fn try_from(value: i32) -> Result { Ok(match value { 0 => Kind::Unknown, 1 => Kind::View, 2 => Kind::Active, 3 => Kind::Committed, _ => return Err(Error::InvalidEnumValue(value)), }) } } impl TryFrom for Info { type Error = Error; fn try_from(info: grpc::Info) -> Result { Ok(Info { kind: info.kind.try_into()?, name: info.name, parent: info.parent, labels: info.labels, created_at: info.created_at.unwrap_or_default().try_into()?, updated_at: info.updated_at.unwrap_or_default().try_into()?, }) } } impl From for grpc::Info { fn from(info: Info) -> Self { grpc::Info { name: info.name, parent: info.parent, kind: info.kind.into(), created_at: Some(info.created_at.into()), updated_at: Some(info.updated_at.into()), labels: info.labels, } } } #[derive(Debug, Error)] pub enum Error { #[error("Failed to convert GRPC timestamp: {0}")] Timestamp(#[from] prost_types::TimestampError), #[error("Invalid enum value: {0}")] InvalidEnumValue(i32), } impl From for tonic::Status { fn from(err: Error) -> Self { Status::internal(format!("{}", err)) } } ================================================ FILE: crates/snapshots/src/lib.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #![cfg_attr(feature = "docs", doc = include_str!("../README.md"))] // No way to derive Eq with tonic :( // See https://github.com/hyperium/tonic/issues/1056 #![allow(clippy::derive_partial_eq_without_eq)] use std::{collections::HashMap, fmt::Debug, ops::AddAssign, time::SystemTime}; use serde::{Deserialize, Serialize}; use tokio_stream::Stream; pub use tonic; mod convert; mod wrap; pub use wrap::server; /// Generated GRPC apis. pub mod api { #![allow(clippy::tabs_in_doc_comments)] #![allow(rustdoc::invalid_rust_codeblocks)] /// Generated snapshots bindings. pub mod snapshots { pub mod v1 { tonic::include_proto!("containerd.services.snapshots.v1"); } } /// Generated `containerd.types` types. pub mod types { tonic::include_proto!("containerd.types"); } } /// Snapshot kinds. #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Default)] pub enum Kind { #[default] Unknown, View, Active, Committed, } /// Information about a particular snapshot. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Info { /// Active or committed snapshot. pub kind: Kind, /// Name of key of snapshot. pub name: String, /// Name of parent snapshot. pub parent: String, /// Labels for a snapshot. pub labels: HashMap, /// Created time. pub created_at: SystemTime, /// Last updated time. pub updated_at: SystemTime, } impl Default for Info { fn default() -> Self { Info { kind: Default::default(), name: Default::default(), parent: Default::default(), labels: Default::default(), created_at: SystemTime::now(), updated_at: SystemTime::now(), } } } /// Defines statistics for disk resources consumed by the snapshot. /// // These resources only include the resources consumed by the snapshot itself and does not include // resources usage by the parent. #[derive(Debug, Clone, Copy, Default)] pub struct Usage { /// Number of inodes in use. pub inodes: i64, /// Provides usage of snapshot in bytes. pub size: i64, } /// Add the provided usage to the current usage. impl AddAssign for Usage { fn add_assign(&mut self, rhs: Self) { self.inodes += rhs.inodes; self.size += rhs.size; } } /// Snapshotter defines the methods required to implement a snapshot snapshotter for /// allocating, snapshotting and mounting filesystem changesets. The model works /// by building up sets of changes with parent-child relationships. /// /// A snapshot represents a filesystem state. Every snapshot has a parent, where /// the empty parent is represented by the empty string. A diff can be taken /// between a parent and its snapshot to generate a classic layer. #[tonic::async_trait] pub trait Snapshotter: Send + Sync + 'static { /// Error type returned from the underlying snapshotter implementation. /// /// This type must be convertable to GRPC status. type Error: Debug + Into + Send; /// Returns the info for an active or committed snapshot by name or key. /// /// Should be used for parent resolution, existence checks and to discern /// the kind of snapshot. async fn stat(&self, key: String) -> Result; /// Update updates the info for a snapshot. /// /// Only mutable properties of a snapshot may be updated. async fn update( &self, info: Info, fieldpaths: Option>, ) -> Result; /// Usage returns the resource usage of an active or committed snapshot /// excluding the usage of parent snapshots. /// /// The running time of this call for active snapshots is dependent on /// implementation, but may be proportional to the size of the resource. /// Callers should take this into consideration. async fn usage(&self, key: String) -> Result; /// Mounts returns the mounts for the active snapshot transaction identified /// by key. /// /// Can be called on an read-write or readonly transaction. This is /// available only for active snapshots. /// /// This can be used to recover mounts after calling View or Prepare. async fn mounts(&self, key: String) -> Result, Self::Error>; /// Creates an active snapshot identified by key descending from the provided parent. /// The returned mounts can be used to mount the snapshot to capture changes. /// /// If a parent is provided, after performing the mounts, the destination will start /// with the content of the parent. The parent must be a committed snapshot. /// Changes to the mounted destination will be captured in relation to the parent. /// The default parent, "", is an empty directory. /// /// The changes may be saved to a committed snapshot by calling [Snapshotter::commit]. When /// one is done with the transaction, [Snapshotter::remove] should be called on the key. /// /// Multiple calls to [Snapshotter::prepare] or [Snapshotter::view] with the same key should fail. async fn prepare( &self, key: String, parent: String, labels: HashMap, ) -> Result, Self::Error>; /// View behaves identically to [Snapshotter::prepare] except the result may not be /// committed back to the snapshot snapshotter. View call returns a readonly view on /// the parent, with the active snapshot being tracked by the given key. /// /// This method operates identically to [Snapshotter::prepare], except that mounts returned /// may have the readonly flag set. Any modifications to the underlying /// filesystem will be ignored. Implementations may perform this in a more /// efficient manner that differs from what would be attempted with [Snapshotter::prepare]. /// /// Commit may not be called on the provided key and will return an error. /// To collect the resources associated with key, [Snapshotter::remove] must be called with /// key as the argument. async fn view( &self, key: String, parent: String, labels: HashMap, ) -> Result, Self::Error>; /// Capture the changes between key and its parent into a snapshot identified by name. /// /// The name can then be used with the snapshotter's other methods to create subsequent snapshots. /// /// A committed snapshot will be created under name with the parent of the /// active snapshot. /// /// After commit, the snapshot identified by key is removed. async fn commit( &self, name: String, key: String, labels: HashMap, ) -> Result<(), Self::Error>; /// Remove the committed or active snapshot by the provided key. /// /// All resources associated with the key will be removed. /// /// If the snapshot is a parent of another snapshot, its children must be /// removed before proceeding. async fn remove(&self, key: String) -> Result<(), Self::Error>; /// Cleaner defines a type capable of performing asynchronous resource cleanup. /// /// Cleaner interface should be used by snapshotters which implement fast /// removal and deferred resource cleanup. This prevents snapshots from needing /// to perform lengthy resource cleanup before acknowledging a snapshot key /// has been removed and available for re-use. This is also useful when /// performing multi-key removal with the intent of cleaning up all the /// resources after each snapshot key has been removed. async fn clear(&self) -> Result<(), Self::Error> { Ok(()) } /// The type of the stream that returns all snapshots. /// /// An instance of this type is returned by [`Snapshotter::list`] on success. type InfoStream: Stream> + Send + 'static; /// Returns a stream containing all snapshots. /// /// Once `type_alias_impl_trait` is stabilized or if the implementer is willing to use unstable /// features, this function can be implemented using `try_stream` and `yield`. For example, a /// function that lists a single snapshot with the default values would be implemented as /// follows: /// ///```ignore /// type InfoStream = impl Stream> + Send + 'static; /// fn list(&self) -> Result { /// Ok(async_stream::try_stream! { /// yield Info::default(); /// }) /// } /// ``` async fn list( &self, snapshotter: String, filters: Vec, ) -> Result; } ================================================ FILE: crates/snapshots/src/wrap.rs ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ //! Trait wrapper to server GRPC requests. use std::{convert::TryInto, mem, sync::Arc}; use futures::{stream::BoxStream, StreamExt}; use crate::{ api::snapshots::v1::{ snapshots_server::{Snapshots, SnapshotsServer}, *, }, Snapshotter, }; pub struct Wrapper { snapshotter: Arc, } /// Helper to create snapshots server from any object that implements [Snapshotter] trait. pub fn server(snapshotter: Arc) -> SnapshotsServer> { SnapshotsServer::new(Wrapper { snapshotter }) } #[tonic::async_trait] impl Snapshots for Wrapper { async fn prepare( &self, request: tonic::Request, ) -> Result, tonic::Status> { let request = request.into_inner(); let mounts = self .snapshotter .prepare(request.key, request.parent, request.labels) .await .map_err(Into::into)?; let message = PrepareSnapshotResponse { mounts }; Ok(tonic::Response::new(message)) } async fn view( &self, request: tonic::Request, ) -> Result, tonic::Status> { let request = request.into_inner(); let mounts = self .snapshotter .view(request.key, request.parent, request.labels) .await .map_err(Into::into)?; let message = ViewSnapshotResponse { mounts }; Ok(tonic::Response::new(message)) } async fn mounts( &self, request: tonic::Request, ) -> Result, tonic::Status> { let request = request.into_inner(); let mounts = self .snapshotter .mounts(request.key) .await .map_err(Into::into)?; let message = MountsResponse { mounts }; Ok(tonic::Response::new(message)) } async fn commit( &self, request: tonic::Request, ) -> Result, tonic::Status> { let request = request.into_inner(); self.snapshotter .commit(request.name, request.key, request.labels) .await .map_err(Into::into)?; Ok(tonic::Response::new(())) } async fn remove( &self, request: tonic::Request, ) -> Result, tonic::Status> { let request = request.into_inner(); self.snapshotter .remove(request.key) .await .map_err(Into::into)?; Ok(tonic::Response::new(())) } async fn stat( &self, request: tonic::Request, ) -> Result, tonic::Status> { let request = request.into_inner(); let info = self .snapshotter .stat(request.key) .await .map_err(Into::into)?; let message = StatSnapshotResponse { info: Some(info.into()), }; Ok(tonic::Response::new(message)) } async fn update( &self, request: tonic::Request, ) -> Result, tonic::Status> { let request = request.into_inner(); let info = match request.info { Some(info) => info, None => return Err(tonic::Status::failed_precondition("info is required")), }; let info = match info.try_into() { Ok(info) => info, Err(err) => { let msg = format!("Failed to convert timestamp: {}", err); return Err(tonic::Status::invalid_argument(msg)); } }; let fields = request.update_mask.map(|mask| mask.paths); let info = self .snapshotter .update(info, fields) .await .map_err(Into::into)?; let message = UpdateSnapshotResponse { info: Some(info.into()), }; Ok(tonic::Response::new(message)) } type ListStream = BoxStream, 'static>; async fn list( &self, request: tonic::Request, ) -> Result, tonic::Status> { let request = request.into_inner(); let sn = self.snapshotter.clone(); let output = async_stream::try_stream! { let walk_stream = sn.list(request.snapshotter, request.filters).await?; let mut walk_stream = std::pin::pin!(walk_stream); let mut infos = Vec::::new(); while let Some(info) = walk_stream.next().await { infos.push(info?.into()); if infos.len() >= 100 { yield ListSnapshotsResponse { info: mem::take(&mut infos) }; } } if !infos.is_empty() { yield ListSnapshotsResponse { info: infos }; } }; Ok(tonic::Response::new(Box::pin(output))) } async fn usage( &self, request: tonic::Request, ) -> Result, tonic::Status> { let request = request.into_inner(); let usage = self .snapshotter .usage(request.key) .await .map_err(Into::into)?; let message = UsageResponse { size: usage.size, inodes: usage.inodes, }; Ok(tonic::Response::new(message)) } async fn cleanup( &self, _request: tonic::Request, ) -> Result, tonic::Status> { self.snapshotter.clear().await.map_err(Into::into)?; Ok(tonic::Response::new(())) } } ================================================ FILE: crates/snapshots/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.snapshots.v1; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; import "types/mount.proto"; option go_package = "github.com/containerd/containerd/api/services/snapshots/v1;snapshots"; // Snapshot service manages snapshots service Snapshots { rpc Prepare(PrepareSnapshotRequest) returns (PrepareSnapshotResponse); rpc View(ViewSnapshotRequest) returns (ViewSnapshotResponse); rpc Mounts(MountsRequest) returns (MountsResponse); rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty); rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty); rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse); rpc Update(UpdateSnapshotRequest) returns (UpdateSnapshotResponse); rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse); rpc Usage(UsageRequest) returns (UsageResponse); rpc Cleanup(CleanupRequest) returns (google.protobuf.Empty); } message PrepareSnapshotRequest { string snapshotter = 1; string key = 2; string parent = 3; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 4; } message PrepareSnapshotResponse { repeated containerd.types.Mount mounts = 1; } message ViewSnapshotRequest { string snapshotter = 1; string key = 2; string parent = 3; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 4; } message ViewSnapshotResponse { repeated containerd.types.Mount mounts = 1; } message MountsRequest { string snapshotter = 1; string key = 2; } message MountsResponse { repeated containerd.types.Mount mounts = 1; } message RemoveSnapshotRequest { string snapshotter = 1; string key = 2; } message CommitSnapshotRequest { string snapshotter = 1; string name = 2; string key = 3; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 4; string parent = 5; } message StatSnapshotRequest { string snapshotter = 1; string key = 2; } enum Kind { UNKNOWN = 0; VIEW = 1; ACTIVE = 2; COMMITTED = 3; } message Info { string name = 1; string parent = 2; Kind kind = 3; // CreatedAt provides the time at which the snapshot was created. google.protobuf.Timestamp created_at = 4; // UpdatedAt provides the time the info was last updated. google.protobuf.Timestamp updated_at = 5; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 6; } message StatSnapshotResponse { Info info = 1; } message UpdateSnapshotRequest { string snapshotter = 1; Info info = 2; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. // // In info, Name, Parent, Kind, Created are immutable, // other field may be updated using this mask. // If no mask is provided, all mutable field are updated. google.protobuf.FieldMask update_mask = 3; } message UpdateSnapshotResponse { Info info = 1; } message ListSnapshotsRequest { string snapshotter = 1; // Filters contains one or more filters using the syntax defined in the // containerd filter package. // // The returned result will be those that match any of the provided // filters. Expanded, images that match the following will be // returned: // // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 2; } message ListSnapshotsResponse { repeated Info info = 1; } message UsageRequest { string snapshotter = 1; string key = 2; } message UsageResponse { int64 size = 1; int64 inodes = 2; } message CleanupRequest { string snapshotter = 1; } ================================================ FILE: crates/snapshots/vendor/github.com/containerd/containerd/api/types/mount.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; // Mount describes mounts for a container. // // This type is the lingua franca of ContainerD. All services provide mounts // to be used with the container at creation time. // // The Mount type follows the structure of the mount syscall, including a type, // source, target and options. message Mount { // Type defines the nature of the mount. string type = 1; // Source specifies the name of the mount. Depending on mount type, this // may be a volume name or a host path, or even ignored. string source = 2; // Target path in container string target = 3; // Options specifies zero or more fstab style mount options. repeated string options = 4; } message ActiveMount { Mount mount = 1; google.protobuf.Timestamp mounted_at = 2; string mount_point = 3; map data = 4; } message ActivationInfo { string name = 1; repeated ActiveMount active = 2; repeated Mount system = 3; map labels = 4; } ================================================ FILE: crates/snapshots/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.services.snapshots.v1; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; import "types/mount.proto"; option go_package = "github.com/containerd/containerd/api/services/snapshots/v1;snapshots"; // Snapshot service manages snapshots service Snapshots { rpc Prepare(PrepareSnapshotRequest) returns (PrepareSnapshotResponse); rpc View(ViewSnapshotRequest) returns (ViewSnapshotResponse); rpc Mounts(MountsRequest) returns (MountsResponse); rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty); rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty); rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse); rpc Update(UpdateSnapshotRequest) returns (UpdateSnapshotResponse); rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse); rpc Usage(UsageRequest) returns (UsageResponse); rpc Cleanup(CleanupRequest) returns (google.protobuf.Empty); } message PrepareSnapshotRequest { string snapshotter = 1; string key = 2; string parent = 3; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 4; } message PrepareSnapshotResponse { repeated containerd.types.Mount mounts = 1; } message ViewSnapshotRequest { string snapshotter = 1; string key = 2; string parent = 3; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 4; } message ViewSnapshotResponse { repeated containerd.types.Mount mounts = 1; } message MountsRequest { string snapshotter = 1; string key = 2; } message MountsResponse { repeated containerd.types.Mount mounts = 1; } message RemoveSnapshotRequest { string snapshotter = 1; string key = 2; } message CommitSnapshotRequest { string snapshotter = 1; string name = 2; string key = 3; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 4; string parent = 5; } message StatSnapshotRequest { string snapshotter = 1; string key = 2; } enum Kind { UNKNOWN = 0; VIEW = 1; ACTIVE = 2; COMMITTED = 3; } message Info { string name = 1; string parent = 2; Kind kind = 3; // CreatedAt provides the time at which the snapshot was created. google.protobuf.Timestamp created_at = 4; // UpdatedAt provides the time the info was last updated. google.protobuf.Timestamp updated_at = 5; // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. map labels = 6; } message StatSnapshotResponse { Info info = 1; } message UpdateSnapshotRequest { string snapshotter = 1; Info info = 2; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. // // In info, Name, Parent, Kind, Created are immutable, // other field may be updated using this mask. // If no mask is provided, all mutable field are updated. google.protobuf.FieldMask update_mask = 3; } message UpdateSnapshotResponse { Info info = 1; } message ListSnapshotsRequest { string snapshotter = 1; // Filters contains one or more filters using the syntax defined in the // containerd filter package. // // The returned result will be those that match any of the provided // filters. Expanded, images that match the following will be // returned: // // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 2; } message ListSnapshotsResponse { repeated Info info = 1; } message UsageRequest { string snapshotter = 1; string key = 2; } message UsageResponse { int64 size = 1; int64 inodes = 2; } message CleanupRequest { string snapshotter = 1; } ================================================ FILE: crates/snapshots/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/mount.proto ================================================ /* Copyright The containerd Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ syntax = "proto3"; package containerd.types; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; // Mount describes mounts for a container. // // This type is the lingua franca of ContainerD. All services provide mounts // to be used with the container at creation time. // // The Mount type follows the structure of the mount syscall, including a type, // source, target and options. message Mount { // Type defines the nature of the mount. string type = 1; // Source specifies the name of the mount. Depending on mount type, this // may be a volume name or a host path, or even ignored. string source = 2; // Target path in container string target = 3; // Options specifies zero or more fstab style mount options. repeated string options = 4; } message ActiveMount { Mount mount = 1; google.protobuf.Timestamp mounted_at = 2; string mount_point = 3; map data = 4; } message ActivationInfo { string name = 1; repeated ActiveMount active = 2; repeated Mount system = 3; map labels = 4; } ================================================ FILE: crates/snapshots/vendor/gogoproto/gogo.proto ================================================ // Protocol Buffers for Go with Gadgets // // Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto2"; package gogoproto; import "google/protobuf/descriptor.proto"; option java_package = "com.google.protobuf"; option java_outer_classname = "GoGoProtos"; option go_package = "github.com/gogo/protobuf/gogoproto"; extend google.protobuf.EnumOptions { optional bool goproto_enum_prefix = 62001; optional bool goproto_enum_stringer = 62021; optional bool enum_stringer = 62022; optional string enum_customname = 62023; optional bool enumdecl = 62024; } extend google.protobuf.EnumValueOptions { optional string enumvalue_customname = 66001; } extend google.protobuf.FileOptions { optional bool goproto_getters_all = 63001; optional bool goproto_enum_prefix_all = 63002; optional bool goproto_stringer_all = 63003; optional bool verbose_equal_all = 63004; optional bool face_all = 63005; optional bool gostring_all = 63006; optional bool populate_all = 63007; optional bool stringer_all = 63008; optional bool onlyone_all = 63009; optional bool equal_all = 63013; optional bool description_all = 63014; optional bool testgen_all = 63015; optional bool benchgen_all = 63016; optional bool marshaler_all = 63017; optional bool unmarshaler_all = 63018; optional bool stable_marshaler_all = 63019; optional bool sizer_all = 63020; optional bool goproto_enum_stringer_all = 63021; optional bool enum_stringer_all = 63022; optional bool unsafe_marshaler_all = 63023; optional bool unsafe_unmarshaler_all = 63024; optional bool goproto_extensions_map_all = 63025; optional bool goproto_unrecognized_all = 63026; optional bool gogoproto_import = 63027; optional bool protosizer_all = 63028; optional bool compare_all = 63029; optional bool typedecl_all = 63030; optional bool enumdecl_all = 63031; optional bool goproto_registration = 63032; optional bool messagename_all = 63033; optional bool goproto_sizecache_all = 63034; optional bool goproto_unkeyed_all = 63035; } extend google.protobuf.MessageOptions { optional bool goproto_getters = 64001; optional bool goproto_stringer = 64003; optional bool verbose_equal = 64004; optional bool face = 64005; optional bool gostring = 64006; optional bool populate = 64007; optional bool stringer = 67008; optional bool onlyone = 64009; optional bool equal = 64013; optional bool description = 64014; optional bool testgen = 64015; optional bool benchgen = 64016; optional bool marshaler = 64017; optional bool unmarshaler = 64018; optional bool stable_marshaler = 64019; optional bool sizer = 64020; optional bool unsafe_marshaler = 64023; optional bool unsafe_unmarshaler = 64024; optional bool goproto_extensions_map = 64025; optional bool goproto_unrecognized = 64026; optional bool protosizer = 64028; optional bool compare = 64029; optional bool typedecl = 64030; optional bool messagename = 64033; optional bool goproto_sizecache = 64034; optional bool goproto_unkeyed = 64035; } extend google.protobuf.FieldOptions { optional bool nullable = 65001; optional bool embed = 65002; optional string customtype = 65003; optional string customname = 65004; optional string jsontag = 65005; optional string moretags = 65006; optional string casttype = 65007; optional string castkey = 65008; optional string castvalue = 65009; optional bool stdtime = 65010; optional bool stdduration = 65011; optional bool wktpointer = 65012; } ================================================ FILE: crates/snapshots/vendor/google/protobuf/descriptor.proto ================================================ // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Author: kenton@google.com (Kenton Varda) // Based on original Protocol Buffers design by // Sanjay Ghemawat, Jeff Dean, and others. // // The messages in this file describe the definitions found in .proto files. // A valid .proto file can be translated directly to a FileDescriptorProto // without any other information (e.g. without reading its imports). syntax = "proto2"; package google.protobuf; option go_package = "google.golang.org/protobuf/types/descriptorpb"; option java_package = "com.google.protobuf"; option java_outer_classname = "DescriptorProtos"; option csharp_namespace = "Google.Protobuf.Reflection"; option objc_class_prefix = "GPB"; option cc_enable_arenas = true; // descriptor.proto must be optimized for speed because reflection-based // algorithms don't work during bootstrapping. option optimize_for = SPEED; // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. message FileDescriptorSet { repeated FileDescriptorProto file = 1; } // Describes a complete .proto file. message FileDescriptorProto { optional string name = 1; // file name, relative to root of source tree optional string package = 2; // e.g. "foo", "foo.bar", etc. // Names of files imported by this file. repeated string dependency = 3; // Indexes of the public imported files in the dependency list above. repeated int32 public_dependency = 10; // Indexes of the weak imported files in the dependency list. // For Google-internal migration only. Do not use. repeated int32 weak_dependency = 11; // All top-level definitions in this file. repeated DescriptorProto message_type = 4; repeated EnumDescriptorProto enum_type = 5; repeated ServiceDescriptorProto service = 6; repeated FieldDescriptorProto extension = 7; optional FileOptions options = 8; // This field contains optional information about the original source code. // You may safely remove this entire field without harming runtime // functionality of the descriptors -- the information is needed only by // development tools. optional SourceCodeInfo source_code_info = 9; // The syntax of the proto file. // The supported values are "proto2" and "proto3". optional string syntax = 12; } // Describes a message type. message DescriptorProto { optional string name = 1; repeated FieldDescriptorProto field = 2; repeated FieldDescriptorProto extension = 6; repeated DescriptorProto nested_type = 3; repeated EnumDescriptorProto enum_type = 4; message ExtensionRange { optional int32 start = 1; // Inclusive. optional int32 end = 2; // Exclusive. optional ExtensionRangeOptions options = 3; } repeated ExtensionRange extension_range = 5; repeated OneofDescriptorProto oneof_decl = 8; optional MessageOptions options = 7; // Range of reserved tag numbers. Reserved tag numbers may not be used by // fields or extension ranges in the same message. Reserved ranges may // not overlap. message ReservedRange { optional int32 start = 1; // Inclusive. optional int32 end = 2; // Exclusive. } repeated ReservedRange reserved_range = 9; // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. repeated string reserved_name = 10; } message ExtensionRangeOptions { // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } // Describes a field within a message. message FieldDescriptorProto { enum Type { // 0 is reserved for errors. // Order is weird for historical reasons. TYPE_DOUBLE = 1; TYPE_FLOAT = 2; // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if // negative values are likely. TYPE_INT64 = 3; TYPE_UINT64 = 4; // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if // negative values are likely. TYPE_INT32 = 5; TYPE_FIXED64 = 6; TYPE_FIXED32 = 7; TYPE_BOOL = 8; TYPE_STRING = 9; // Tag-delimited aggregate. // Group type is deprecated and not supported in proto3. However, Proto3 // implementations should still be able to parse the group wire format and // treat group fields as unknown fields. TYPE_GROUP = 10; TYPE_MESSAGE = 11; // Length-delimited aggregate. // New in version 2. TYPE_BYTES = 12; TYPE_UINT32 = 13; TYPE_ENUM = 14; TYPE_SFIXED32 = 15; TYPE_SFIXED64 = 16; TYPE_SINT32 = 17; // Uses ZigZag encoding. TYPE_SINT64 = 18; // Uses ZigZag encoding. } enum Label { // 0 is reserved for errors LABEL_OPTIONAL = 1; LABEL_REQUIRED = 2; LABEL_REPEATED = 3; } optional string name = 1; optional int32 number = 3; optional Label label = 4; // If type_name is set, this need not be set. If both this and type_name // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. optional Type type = 5; // For message and enum types, this is the name of the type. If the name // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping // rules are used to find the type (i.e. first the nested types within this // message are searched, then within the parent, on up to the root // namespace). optional string type_name = 6; // For extensions, this is the name of the type being extended. It is // resolved in the same manner as type_name. optional string extendee = 2; // For numeric types, contains the original text representation of the value. // For booleans, "true" or "false". // For strings, contains the default text contents (not escaped in any way). // For bytes, contains the C escaped value. All bytes >= 128 are escaped. // TODO(kenton): Base-64 encode? optional string default_value = 7; // If set, gives the index of a oneof in the containing type's oneof_decl // list. This field is a member of that oneof. optional int32 oneof_index = 9; // JSON name of this field. The value is set by protocol compiler. If the // user has set a "json_name" option on this field, that option's value // will be used. Otherwise, it's deduced from the field's name by converting // it to camelCase. optional string json_name = 10; optional FieldOptions options = 8; // If true, this is a proto3 "optional". When a proto3 field is optional, it // tracks presence regardless of field type. // // When proto3_optional is true, this field must be belong to a oneof to // signal to old proto3 clients that presence is tracked for this field. This // oneof is known as a "synthetic" oneof, and this field must be its sole // member (each proto3 optional field gets its own synthetic oneof). Synthetic // oneofs exist in the descriptor only, and do not generate any API. Synthetic // oneofs must be ordered after all "real" oneofs. // // For message fields, proto3_optional doesn't create any semantic change, // since non-repeated message fields always track presence. However it still // indicates the semantic detail of whether the user wrote "optional" or not. // This can be useful for round-tripping the .proto file. For consistency we // give message fields a synthetic oneof also, even though it is not required // to track presence. This is especially important because the parser can't // tell if a field is a message or an enum, so it must always create a // synthetic oneof. // // Proto2 optional fields do not set this flag, because they already indicate // optional with `LABEL_OPTIONAL`. optional bool proto3_optional = 17; } // Describes a oneof. message OneofDescriptorProto { optional string name = 1; optional OneofOptions options = 2; } // Describes an enum type. message EnumDescriptorProto { optional string name = 1; repeated EnumValueDescriptorProto value = 2; optional EnumOptions options = 3; // Range of reserved numeric values. Reserved values may not be used by // entries in the same enum. Reserved ranges may not overlap. // // Note that this is distinct from DescriptorProto.ReservedRange in that it // is inclusive such that it can appropriately represent the entire int32 // domain. message EnumReservedRange { optional int32 start = 1; // Inclusive. optional int32 end = 2; // Inclusive. } // Range of reserved numeric values. Reserved numeric values may not be used // by enum values in the same enum declaration. Reserved ranges may not // overlap. repeated EnumReservedRange reserved_range = 4; // Reserved enum value names, which may not be reused. A given name may only // be reserved once. repeated string reserved_name = 5; } // Describes a value within an enum. message EnumValueDescriptorProto { optional string name = 1; optional int32 number = 2; optional EnumValueOptions options = 3; } // Describes a service. message ServiceDescriptorProto { optional string name = 1; repeated MethodDescriptorProto method = 2; optional ServiceOptions options = 3; } // Describes a method of a service. message MethodDescriptorProto { optional string name = 1; // Input and output type names. These are resolved in the same way as // FieldDescriptorProto.type_name, but must refer to a message type. optional string input_type = 2; optional string output_type = 3; optional MethodOptions options = 4; // Identifies if client streams multiple client messages optional bool client_streaming = 5 [default = false]; // Identifies if server streams multiple server messages optional bool server_streaming = 6 [default = false]; } // =================================================================== // Options // Each of the definitions above may have "options" attached. These are // just annotations which may cause code to be generated slightly differently // or may contain hints for code that manipulates protocol messages. // // Clients may define custom options as extensions of the *Options messages. // These extensions may not yet be known at parsing time, so the parser cannot // store the values in them. Instead it stores them in a field in the *Options // message called uninterpreted_option. This field must have the same name // across all *Options messages. We then use this field to populate the // extensions when we build a descriptor, at which point all protos have been // parsed and so all extensions are known. // // Extension numbers for custom options may be chosen as follows: // * For options which will only be used within a single application or // organization, or for experimental options, use field numbers 50000 // through 99999. It is up to you to ensure that you do not use the // same number for multiple options. // * For options which will be published and used publicly by multiple // independent entities, e-mail protobuf-global-extension-registry@google.com // to reserve extension numbers. Simply provide your project name (e.g. // Objective-C plugin) and your project website (if available) -- there's no // need to explain how you intend to use them. Usually you only need one // extension number. You can declare multiple options with only one extension // number by putting them in a sub-message. See the Custom Options section of // the docs for examples: // https://developers.google.com/protocol-buffers/docs/proto#options // If this turns out to be popular, a web service will be set up // to automatically assign option numbers. message FileOptions { // Sets the Java package where classes generated from this .proto will be // placed. By default, the proto package is used, but this is often // inappropriate because proto packages do not normally start with backwards // domain names. optional string java_package = 1; // Controls the name of the wrapper Java class generated for the .proto file. // That class will always contain the .proto file's getDescriptor() method as // well as any top-level extensions defined in the .proto file. // If java_multiple_files is disabled, then all the other classes from the // .proto file will be nested inside the single wrapper outer class. optional string java_outer_classname = 8; // If enabled, then the Java code generator will generate a separate .java // file for each top-level message, enum, and service defined in the .proto // file. Thus, these types will *not* be nested inside the wrapper class // named by java_outer_classname. However, the wrapper class will still be // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. optional bool java_multiple_files = 10 [default = false]; // This option does nothing. optional bool java_generate_equals_and_hash = 20 [deprecated=true]; // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. // Message reflection will do the same. // However, an extension field still accepts non-UTF-8 byte sequences. // This option has no effect on when used with the lite runtime. optional bool java_string_check_utf8 = 27 [default = false]; // Generated classes can be optimized for speed or code size. enum OptimizeMode { SPEED = 1; // Generate complete code for parsing, serialization, // etc. CODE_SIZE = 2; // Use ReflectionOps to implement these methods. LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. } optional OptimizeMode optimize_for = 9 [default = SPEED]; // Sets the Go package where structs generated from this .proto will be // placed. If omitted, the Go package will be derived from the following: // - The basename of the package import path, if provided. // - Otherwise, the package statement in the .proto file, if present. // - Otherwise, the basename of the .proto file, without extension. optional string go_package = 11; // Should generic services be generated in each language? "Generic" services // are not specific to any particular RPC system. They are generated by the // main code generators in each language (without additional plugins). // Generic services were the only kind of service generation supported by // early versions of google.protobuf. // // Generic services are now considered deprecated in favor of using plugins // that generate code specific to your particular RPC system. Therefore, // these default to false. Old code which depends on generic services should // explicitly set them to true. optional bool cc_generic_services = 16 [default = false]; optional bool java_generic_services = 17 [default = false]; optional bool py_generic_services = 18 [default = false]; optional bool php_generic_services = 42 [default = false]; // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very // least, this is a formalization for deprecating files. optional bool deprecated = 23 [default = false]; // Enables the use of arenas for the proto messages in this file. This applies // only to generated classes for C++. optional bool cc_enable_arenas = 31 [default = true]; // Sets the objective c class prefix which is prepended to all objective c // generated classes from this .proto. There is no default. optional string objc_class_prefix = 36; // Namespace for generated classes; defaults to the package. optional string csharp_namespace = 37; // By default Swift generators will take the proto package and CamelCase it // replacing '.' with underscore and use that to prefix the types/symbols // defined. When this options is provided, they will use this value instead // to prefix the types/symbols defined. optional string swift_prefix = 39; // Sets the php class prefix which is prepended to all php generated classes // from this .proto. Default is empty. optional string php_class_prefix = 40; // Use this option to change the namespace of php generated classes. Default // is empty. When this option is empty, the package name will be used for // determining the namespace. optional string php_namespace = 41; // Use this option to change the namespace of php generated metadata classes. // Default is empty. When this option is empty, the proto file name will be // used for determining the namespace. optional string php_metadata_namespace = 44; // Use this option to change the package of ruby generated classes. Default // is empty. When this option is not set, the package name will be used for // determining the ruby package. optional string ruby_package = 45; // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. // See the documentation for the "Options" section above. extensions 1000 to max; reserved 38; } message MessageOptions { // Set true to use the old proto1 MessageSet wire format for extensions. // This is provided for backwards-compatibility with the MessageSet wire // format. You should not use this for any other reason: It's less // efficient, has fewer features, and is more complicated. // // The message must be defined exactly as follows: // message Foo { // option message_set_wire_format = true; // extensions 4 to max; // } // Note that the message cannot have any defined fields; MessageSets only // have extensions. // // All extensions of your type must be singular messages; e.g. they cannot // be int32s, enums, or repeated messages. // // Because this is an option, the above two restrictions are not enforced by // the protocol compiler. optional bool message_set_wire_format = 1 [default = false]; // Disables the generation of the standard "descriptor()" accessor, which can // conflict with a field of the same name. This is meant to make migration // from proto1 easier; new code should avoid fields named "descriptor". optional bool no_standard_descriptor_accessor = 2 [default = false]; // Is this message deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. optional bool deprecated = 3 [default = false]; reserved 4, 5, 6; // Whether the message is an automatically generated map entry type for the // maps field. // // For maps fields: // map map_field = 1; // The parsed descriptor looks like: // message MapFieldEntry { // option map_entry = true; // optional KeyType key = 1; // optional ValueType value = 2; // } // repeated MapFieldEntry map_field = 1; // // Implementations may choose not to generate the map_entry=true message, but // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. // // NOTE: Do not set the option in .proto files. Always use the maps syntax // instead. The option should only be implicitly set by the proto compiler // parser. optional bool map_entry = 7; reserved 8; // javalite_serializable reserved 9; // javanano_as_lite // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message FieldOptions { // The ctype option instructs the C++ code generator to use a different // representation of the field than it normally would. See the specific // options below. This option is not yet implemented in the open source // release -- sorry, we'll try to include it in a future version! optional CType ctype = 1 [default = STRING]; enum CType { // Default mode. STRING = 0; CORD = 1; STRING_PIECE = 2; } // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as // a single length-delimited blob. In proto3, only explicit setting it to // false will avoid using packed encoding. optional bool packed = 2; // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING // is represented as JavaScript string, which avoids loss of precision that // can happen when a large value is converted to a floating point JavaScript. // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to // use the JavaScript "number" type. The behavior of the default option // JS_NORMAL is implementation dependent. // // This option is an enum to permit additional types to be added, e.g. // goog.math.Integer. optional JSType jstype = 6 [default = JS_NORMAL]; enum JSType { // Use the default type. JS_NORMAL = 0; // Use JavaScript strings. JS_STRING = 1; // Use JavaScript numbers. JS_NUMBER = 2; } // Should this field be parsed lazily? Lazy applies only to message-type // fields. It means that when the outer message is initially parsed, the // inner message's contents will not be parsed but instead stored in encoded // form. The inner message will actually be parsed when it is first accessed. // // This is only a hint. Implementations are free to choose whether to use // eager or lazy parsing regardless of the value of this option. However, // setting this option true suggests that the protocol author believes that // using lazy parsing on this field is worth the additional bookkeeping // overhead typically needed to implement it. // // This option does not affect the public interface of any generated code; // all method signatures remain the same. Furthermore, thread-safety of the // interface is not affected by this option; const methods remain safe to // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // // // Note that implementations may choose not to check required fields within // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. // This is necessary because otherwise the inner message would have to be // parsed in order to perform the check, defeating the purpose of lazy // parsing. An implementation which chooses not to check required fields // must be consistent about it. That is, for any particular sub-message, the // implementation must either *always* check its required fields, or *never* // check its required fields, regardless of whether or not the message has // been parsed. optional bool lazy = 5 [default = false]; // Is this field deprecated? // Depending on the target platform, this can emit Deprecated annotations // for accessors, or it will be completely ignored; in the very least, this // is a formalization for deprecating fields. optional bool deprecated = 3 [default = false]; // For Google-internal migration only. Do not use. optional bool weak = 10 [default = false]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; reserved 4; // removed jtype } message OneofOptions { // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message EnumOptions { // Set this option to true to allow mapping different tag names to the same // value. optional bool allow_alias = 2; // Is this enum deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the enum, or it will be completely ignored; in the very least, this // is a formalization for deprecating enums. optional bool deprecated = 3 [default = false]; reserved 5; // javanano_as_lite // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message EnumValueOptions { // Is this enum value deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the enum value, or it will be completely ignored; in the very least, // this is a formalization for deprecating enum values. optional bool deprecated = 1 [default = false]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message ServiceOptions { // Note: Field numbers 1 through 32 are reserved for Google's internal RPC // framework. We apologize for hoarding these numbers to ourselves, but // we were already using them long before we decided to release Protocol // Buffers. // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the service, or it will be completely ignored; in the very least, // this is a formalization for deprecating services. optional bool deprecated = 33 [default = false]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } message MethodOptions { // Note: Field numbers 1 through 32 are reserved for Google's internal RPC // framework. We apologize for hoarding these numbers to ourselves, but // we were already using them long before we decided to release Protocol // Buffers. // Is this method deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the method, or it will be completely ignored; in the very least, // this is a formalization for deprecating methods. optional bool deprecated = 33 [default = false]; // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, // or neither? HTTP based RPC implementation may choose GET verb for safe // methods, and PUT verb for idempotent methods instead of the default POST. enum IdempotencyLevel { IDEMPOTENCY_UNKNOWN = 0; NO_SIDE_EFFECTS = 1; // implies idempotent IDEMPOTENT = 2; // idempotent, but may have side effects } optional IdempotencyLevel idempotency_level = 34 [default = IDEMPOTENCY_UNKNOWN]; // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; } // A message representing a option the parser does not recognize. This only // appears in options protos created by the compiler::Parser class. // DescriptorPool resolves these when building Descriptor objects. Therefore, // options protos in descriptor objects (e.g. returned by Descriptor::options(), // or produced by Descriptor::CopyTo()) will never have UninterpretedOptions // in them. message UninterpretedOption { // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents // "foo.(bar.baz).qux". message NamePart { required string name_part = 1; required bool is_extension = 2; } repeated NamePart name = 2; // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. optional string identifier_value = 3; optional uint64 positive_int_value = 4; optional int64 negative_int_value = 5; optional double double_value = 6; optional bytes string_value = 7; optional string aggregate_value = 8; } // =================================================================== // Optional source code info // Encapsulates information about the original source file from which a // FileDescriptorProto was generated. message SourceCodeInfo { // A Location identifies a piece of source code in a .proto file which // corresponds to a particular definition. This information is intended // to be useful to IDEs, code indexers, documentation generators, and similar // tools. // // For example, say we have a file like: // message Foo { // optional string foo = 1; // } // Let's look at just the field definition: // optional string foo = 1; // ^ ^^ ^^ ^ ^^^ // a bc de f ghi // We have the following locations: // span path represents // [a,i) [ 4, 0, 2, 0 ] The whole field definition. // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). // // Notes: // - A location may refer to a repeated field itself (i.e. not to any // particular index within it). This is used whenever a set of elements are // logically enclosed in a single code segment. For example, an entire // extend block (possibly containing multiple extension definitions) will // have an outer location whose path refers to the "extensions" repeated // field without an index. // - Multiple locations may have the same path. This happens when a single // logical declaration is spread out across multiple places. The most // obvious example is the "extend" block again -- there may be multiple // extend blocks in the same scope, each of which will have the same path. // - A location's span is not always a subset of its parent's span. For // example, the "extendee" of an extension declaration appears at the // beginning of the "extend" block and is shared by all extensions within // the block. // - Just because a location's span is a subset of some other location's span // does not mean that it is a descendant. For example, a "group" defines // both a type and a field in a single declaration. Thus, the locations // corresponding to the type and field and their components will overlap. // - Code which tries to interpret locations should probably be designed to // ignore those that it doesn't understand, as more types of locations could // be recorded in the future. repeated Location location = 1; message Location { // Identifies which part of the FileDescriptorProto was defined at this // location. // // Each element is a field number or an index. They form a path from // the root FileDescriptorProto to the place where the definition. For // example, this path: // [ 4, 3, 2, 7, 1 ] // refers to: // file.message_type(3) // 4, 3 // .field(7) // 2, 7 // .name() // 1 // This is because FileDescriptorProto.message_type has field number 4: // repeated DescriptorProto message_type = 4; // and DescriptorProto.field has field number 2: // repeated FieldDescriptorProto field = 2; // and FieldDescriptorProto.name has field number 1: // optional string name = 1; // // Thus, the above path gives the location of a field name. If we removed // the last element: // [ 4, 3, 2, 7 ] // this path refers to the whole field declaration (from the beginning // of the label to the terminating semicolon). repeated int32 path = 1 [packed = true]; // Always has exactly three or four elements: start line, start column, // end line (optional, otherwise assumed same as start line), end column. // These are packed into a single field for efficiency. Note that line // and column numbers are zero-based -- typically you will want to add // 1 to each before displaying to a user. repeated int32 span = 2 [packed = true]; // If this SourceCodeInfo represents a complete declaration, these are any // comments appearing before and after the declaration which appear to be // attached to the declaration. // // A series of line comments appearing on consecutive lines, with no other // tokens appearing on those lines, will be treated as a single comment. // // leading_detached_comments will keep paragraphs of comments that appear // before (but not connected to) the current element. Each paragraph, // separated by empty lines, will be one comment element in the repeated // field. // // Only the comment content is provided; comment markers (e.g. //) are // stripped out. For block comments, leading whitespace and an asterisk // will be stripped from the beginning of each line other than the first. // Newlines are included in the output. // // Examples: // // optional int32 foo = 1; // Comment attached to foo. // // Comment attached to bar. // optional int32 bar = 2; // // optional string baz = 3; // // Comment attached to baz. // // Another line attached to baz. // // // Comment attached to qux. // // // // Another line attached to qux. // optional double qux = 4; // // // Detached comment for corge. This is not leading or trailing comments // // to qux or corge because there are blank lines separating it from // // both. // // // Detached comment for corge paragraph 2. // // optional string corge = 5; // /* Block comment attached // * to corge. Leading asterisks // * will be removed. */ // /* Block comment attached to // * grault. */ // optional int32 grault = 6; // // // ignored detached comments. optional string leading_comments = 3; optional string trailing_comments = 4; repeated string leading_detached_comments = 6; } } // Describes the relationship between generated code and its original source // file. A GeneratedCodeInfo message is associated with only one generated // source file, but may contain references to different source .proto files. message GeneratedCodeInfo { // An Annotation connects some span of text in generated code to an element // of its generating .proto file. repeated Annotation annotation = 1; message Annotation { // Identifies the element in the original source .proto file. This field // is formatted the same as SourceCodeInfo.Location.path. repeated int32 path = 1 [packed = true]; // Identifies the filesystem path to the original source .proto. optional string source_file = 2; // Identifies the starting offset in bytes in the generated code // that relates to the identified object. optional int32 begin = 3; // Identifies the ending offset in bytes in the generated code that // relates to the identified offset. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). optional int32 end = 4; } } ================================================ FILE: crates/snapshots/vendor/google/protobuf/empty.proto ================================================ // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option go_package = "google.golang.org/protobuf/types/known/emptypb"; option java_package = "com.google.protobuf"; option java_outer_classname = "EmptyProto"; option java_multiple_files = true; option objc_class_prefix = "GPB"; option cc_enable_arenas = true; // A generic empty message that you can re-use to avoid defining duplicated // empty messages in your APIs. A typical example is to use it as the request // or the response type of an API method. For instance: // // service Foo { // rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); // } // // The JSON representation for `Empty` is empty JSON object `{}`. message Empty {} ================================================ FILE: crates/snapshots/vendor/google/protobuf/field_mask.proto ================================================ // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option java_package = "com.google.protobuf"; option java_outer_classname = "FieldMaskProto"; option java_multiple_files = true; option objc_class_prefix = "GPB"; option go_package = "google.golang.org/protobuf/types/known/fieldmaskpb"; option cc_enable_arenas = true; // `FieldMask` represents a set of symbolic field paths, for example: // // paths: "f.a" // paths: "f.b.d" // // Here `f` represents a field in some root message, `a` and `b` // fields in the message found in `f`, and `d` a field found in the // message in `f.b`. // // Field masks are used to specify a subset of fields that should be // returned by a get operation or modified by an update operation. // Field masks also have a custom JSON encoding (see below). // // # Field Masks in Projections // // When used in the context of a projection, a response message or // sub-message is filtered by the API to only contain those fields as // specified in the mask. For example, if the mask in the previous // example is applied to a response message as follows: // // f { // a : 22 // b { // d : 1 // x : 2 // } // y : 13 // } // z: 8 // // The result will not contain specific values for fields x,y and z // (their value will be set to the default, and omitted in proto text // output): // // // f { // a : 22 // b { // d : 1 // } // } // // A repeated field is not allowed except at the last position of a // paths string. // // If a FieldMask object is not present in a get operation, the // operation applies to all fields (as if a FieldMask of all fields // had been specified). // // Note that a field mask does not necessarily apply to the // top-level response message. In case of a REST get operation, the // field mask applies directly to the response, but in case of a REST // list operation, the mask instead applies to each individual message // in the returned resource list. In case of a REST custom method, // other definitions may be used. Where the mask applies will be // clearly documented together with its declaration in the API. In // any case, the effect on the returned resource/resources is required // behavior for APIs. // // # Field Masks in Update Operations // // A field mask in update operations specifies which fields of the // targeted resource are going to be updated. The API is required // to only change the values of the fields as specified in the mask // and leave the others untouched. If a resource is passed in to // describe the updated values, the API ignores the values of all // fields not covered by the mask. // // If a repeated field is specified for an update operation, new values will // be appended to the existing repeated field in the target resource. Note that // a repeated field is only allowed in the last position of a `paths` string. // // If a sub-message is specified in the last position of the field mask for an // update operation, then new value will be merged into the existing sub-message // in the target resource. // // For example, given the target message: // // f { // b { // d: 1 // x: 2 // } // c: [1] // } // // And an update message: // // f { // b { // d: 10 // } // c: [2] // } // // then if the field mask is: // // paths: ["f.b", "f.c"] // // then the result will be: // // f { // b { // d: 10 // x: 2 // } // c: [1, 2] // } // // An implementation may provide options to override this default behavior for // repeated and message fields. // // In order to reset a field's value to the default, the field must // be in the mask and set to the default value in the provided resource. // Hence, in order to reset all fields of a resource, provide a default // instance of the resource and set all fields in the mask, or do // not provide a mask as described below. // // If a field mask is not present on update, the operation applies to // all fields (as if a field mask of all fields has been specified). // Note that in the presence of schema evolution, this may mean that // fields the client does not know and has therefore not filled into // the request will be reset to their default. If this is unwanted // behavior, a specific service may require a client to always specify // a field mask, producing an error if not. // // As with get operations, the location of the resource which // describes the updated values in the request message depends on the // operation kind. In any case, the effect of the field mask is // required to be honored by the API. // // ## Considerations for HTTP REST // // The HTTP kind of an update operation which uses a field mask must // be set to PATCH instead of PUT in order to satisfy HTTP semantics // (PUT must only be used for full updates). // // # JSON Encoding of Field Masks // // In JSON, a field mask is encoded as a single string where paths are // separated by a comma. Fields name in each path are converted // to/from lower-camel naming conventions. // // As an example, consider the following message declarations: // // message Profile { // User user = 1; // Photo photo = 2; // } // message User { // string display_name = 1; // string address = 2; // } // // In proto a field mask for `Profile` may look as such: // // mask { // paths: "user.display_name" // paths: "photo" // } // // In JSON, the same mask is represented as below: // // { // mask: "user.displayName,photo" // } // // # Field Masks and Oneof Fields // // Field masks treat fields in oneofs just as regular fields. Consider the // following message: // // message SampleMessage { // oneof test_oneof { // string name = 4; // SubMessage sub_message = 9; // } // } // // The field mask can be: // // mask { // paths: "name" // } // // Or: // // mask { // paths: "sub_message" // } // // Note that oneof type names ("test_oneof" in this case) cannot be used in // paths. // // ## Field Mask Verification // // The implementation of any API method which has a FieldMask type field in the // request should verify the included field paths, and return an // `INVALID_ARGUMENT` error if any path is unmappable. message FieldMask { // The set of field mask paths. repeated string paths = 1; } ================================================ FILE: crates/snapshots/vendor/google/protobuf/timestamp.proto ================================================ // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; option cc_enable_arenas = true; option go_package = "google.golang.org/protobuf/types/known/timestamppb"; option java_package = "com.google.protobuf"; option java_outer_classname = "TimestampProto"; option java_multiple_files = true; option objc_class_prefix = "GPB"; // A Timestamp represents a point in time independent of any time zone or local // calendar, encoded as a count of seconds and fractions of seconds at // nanosecond resolution. The count is relative to an epoch at UTC midnight on // January 1, 1970, in the proleptic Gregorian calendar which extends the // Gregorian calendar backwards to year one. // // All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap // second table is needed for interpretation, using a [24-hour linear // smear](https://developers.google.com/time/smear). // // The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By // restricting to that range, we ensure that we can convert to and from [RFC // 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. // // # Examples // // Example 1: Compute Timestamp from POSIX `time()`. // // Timestamp timestamp; // timestamp.set_seconds(time(NULL)); // timestamp.set_nanos(0); // // Example 2: Compute Timestamp from POSIX `gettimeofday()`. // // struct timeval tv; // gettimeofday(&tv, NULL); // // Timestamp timestamp; // timestamp.set_seconds(tv.tv_sec); // timestamp.set_nanos(tv.tv_usec * 1000); // // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. // // FILETIME ft; // GetSystemTimeAsFileTime(&ft); // UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; // // // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z // // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. // Timestamp timestamp; // timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); // timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); // // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. // // long millis = System.currentTimeMillis(); // // Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) // .setNanos((int) ((millis % 1000) * 1000000)).build(); // // // Example 5: Compute Timestamp from Java `Instant.now()`. // // Instant now = Instant.now(); // // Timestamp timestamp = // Timestamp.newBuilder().setSeconds(now.getEpochSecond()) // .setNanos(now.getNano()).build(); // // // Example 6: Compute Timestamp from current time in Python. // // timestamp = Timestamp() // timestamp.GetCurrentTime() // // # JSON Mapping // // In JSON format, the Timestamp type is encoded as a string in the // [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the // format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" // where {year} is always expressed using four digits while {month}, {day}, // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone // is required. A proto3 JSON serializer should always use UTC (as indicated by // "Z") when printing the Timestamp type and a proto3 JSON parser should be // able to accept both UTC and other timezones (as indicated by an offset). // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. // // In JavaScript, one can convert a Date object to this format using the // standard // [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) // method. In Python, a standard `datetime.datetime` object can be converted // to this format using // [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with // the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use // the Joda Time's [`ISODateTimeFormat.dateTime()`]( // http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D // ) to obtain a formatter capable of generating timestamps in this format. // // message Timestamp { // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. int64 seconds = 1; // Non-negative fractions of a second at nanosecond resolution. Negative // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. int32 nanos = 2; } ================================================ FILE: deny.toml ================================================ # This template contains all of the possible sections and their default values # Note that all fields that take a lint level have these possible values: # * deny - An error will be produced and the check will fail # * warn - A warning will be produced, but the check will not fail # * allow - No warning or error will be produced, though in some cases a note # will be # The values provided in this template are the default values that will be used # when any section or field is not specified in your own configuration # Root options # The graph table configures how the dependency graph is constructed and thus # which crates the checks are performed against [graph] # If 1 or more target triples (and optionally, target_features) are specified, # only the specified targets will be checked when running `cargo deny check`. # This means, if a particular package is only ever used as a target specific # dependency, such as, for example, the `nix` crate only being used via the # `target_family = "unix"` configuration, that only having windows targets in # this list would mean the nix crate, as well as any of its exclusive # dependencies not shared by any other crates, would be ignored, as the target # list here is effectively saying which targets you are building for. targets = [ # The triple can be any string, but only the target triples built in to # rustc (as of 1.40) can be checked against actual config expressions #"x86_64-unknown-linux-musl", # You can also specify which target_features you promise are enabled for a # particular target. target_features are currently not validated against # the actual valid features supported by the target architecture. #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, ] # When creating the dependency graph used as the source of truth when checks are # executed, this field can be used to prune crates from the graph, removing them # from the view of cargo-deny. This is an extremely heavy hammer, as if a crate # is pruned from the graph, all of its dependencies will also be pruned unless # they are connected to another crate in the graph that hasn't been pruned, # so it should be used with care. The identifiers are [Package ID Specifications] # (https://doc.rust-lang.org/cargo/reference/pkgid-spec.html) #exclude = [] # If true, metadata will be collected with `--all-features`. Note that this can't # be toggled off if true, if you want to conditionally enable `--all-features` it # is recommended to pass `--all-features` on the cmd line instead all-features = false # If true, metadata will be collected with `--no-default-features`. The same # caveat with `all-features` applies no-default-features = false # If set, these feature will be enabled when collecting metadata. If `--features` # is specified on the cmd line they will take precedence over this option. #features = [] # The output table provides options for how/if diagnostics are outputted [output] # When outputting inclusion graphs in diagnostics that include features, this # option can be used to specify the depth at which feature edges will be added. # This option is included since the graphs can be quite large and the addition # of features from the crate(s) to all of the graph roots can be far too verbose. # This option can be overridden via `--feature-depth` on the cmd line feature-depth = 1 # This section is considered when running `cargo deny check advisories` # More documentation for the advisories section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html [advisories] # The path where the advisory databases are cloned/fetched into #db-path = "$CARGO_HOME/advisory-dbs" # The url(s) of the advisory databases to use #db-urls = ["https://github.com/rustsec/advisory-db"] # A list of advisory IDs to ignore. Note that ignored advisories will still # output a note when they are encountered. ignore = [ #"RUSTSEC-0000-0000", #{ id = "RUSTSEC-0000-0000", reason = "you can specify a reason the advisory is ignored" }, #"a-crate-that-is-yanked@0.1.1", # you can also ignore yanked crate versions if you wish #{ crate = "a-crate-that-is-yanked@0.1.1", reason = "you can specify why you are ignoring the yanked crate" }, ] # If this is true, then cargo deny will use the git executable to fetch advisory database. # If this is false, then it uses a built-in git library. # Setting this to true can be helpful if you have special authentication requirements that cargo-deny does not support. # See Git Authentication for more information about setting up git authentication. #git-fetch-with-cli = true # This section is considered when running `cargo deny check licenses` # More documentation for the licenses section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html [licenses] # List of explicitly allowed licenses # See https://spdx.org/licenses/ for list of possible licenses # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. allow = [ "MIT", "Apache-2.0", "BSD-3-Clause", "Zlib", "Unicode-3.0", "Zlib", #"Apache-2.0 WITH LLVM-exception", ] # The confidence threshold for detecting a license from license text. # The higher the value, the more closely the license text must be to the # canonical license text of a valid SPDX license file. # [possible values: any between 0.0 and 1.0]. confidence-threshold = 0.8 # Allow 1 or more licenses on a per-crate basis, so that particular licenses # aren't accepted for every possible crate as with the normal allow list exceptions = [ # Each entry is the crate and version constraint, and its specific allow # list #{ allow = ["Zlib"], crate = "adler32" }, { allow = ["Unicode-DFS-2016"], name = "unicode-ident", version = "*" }, ] # Some crates don't have (easily) machine readable licensing information, # adding a clarification entry for it allows you to manually specify the # licensing information #[[licenses.clarify]] # The package spec the clarification applies to #crate = "ring" # The SPDX expression for the license requirements of the crate #expression = "MIT AND ISC AND OpenSSL" # One or more files in the crate's source used as the "source of truth" for # the license expression. If the contents match, the clarification will be used # when running the license check, otherwise the clarification will be ignored # and the crate will be checked normally, which may produce warnings or errors # depending on the rest of your configuration #license-files = [ # Each entry is a crate relative path, and the (opaque) hash of its contents #{ path = "LICENSE", hash = 0xbd0eed23 } #] [licenses.private] # If true, ignores workspace crates that aren't published, or are only # published to private registries. # To see how to mark a crate as unpublished (to the official registry), # visit https://doc.rust-lang.org/cargo/reference/manifest.html#the-publish-field. ignore = false # One or more private registries that you might publish crates to, if a crate # is only published to private registries, and ignore is true, the crate will # not have its license(s) checked registries = [ #"https://sekretz.com/registry ] # This section is considered when running `cargo deny check bans`. # More documentation about the 'bans' section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html [bans] # Lint level for when multiple versions of the same crate are detected multiple-versions = "warn" # Lint level for when a crate version requirement is `*` wildcards = "allow" # The graph highlighting used when creating dotgraphs for crates # with multiple versions # * lowest-version - The path to the lowest versioned duplicate is highlighted # * simplest-path - The path to the version with the fewest edges is highlighted # * all - Both lowest-version and simplest-path are used highlight = "all" # The default lint level for `default` features for crates that are members of # the workspace that is being checked. This can be overridden by allowing/denying # `default` on a crate-by-crate basis if desired. workspace-default-features = "allow" # The default lint level for `default` features for external crates that are not # members of the workspace. This can be overridden by allowing/denying `default` # on a crate-by-crate basis if desired. external-default-features = "allow" # List of crates that are allowed. Use with care! allow = [ #"ansi_term@0.11.0", #{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is allowed" }, ] # List of crates to deny deny = [ #"ansi_term@0.11.0", #{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is banned" }, # Wrapper crates can optionally be specified to allow the crate when it # is a direct dependency of the otherwise banned crate #{ crate = "ansi_term@0.11.0", wrappers = ["this-crate-directly-depends-on-ansi_term"] }, ] # List of features to allow/deny # Each entry the name of a crate and a version range. If version is # not specified, all versions will be matched. #[[bans.features]] #crate = "reqwest" # Features to not allow #deny = ["json"] # Features to allow #allow = [ # "rustls", # "__rustls", # "__tls", # "hyper-rustls", # "rustls", # "rustls-pemfile", # "rustls-tls-webpki-roots", # "tokio-rustls", # "webpki-roots", #] # If true, the allowed features must exactly match the enabled feature set. If # this is set there is no point setting `deny` #exact = true # Certain crates/versions that will be skipped when doing duplicate detection. skip = [ #"ansi_term@0.11.0", #{ crate = "ansi_term@0.11.0", reason = "you can specify a reason why it can't be updated/removed" }, ] # Similarly to `skip` allows you to skip certain crates during duplicate # detection. Unlike skip, it also includes the entire tree of transitive # dependencies starting at the specified crate, up to a certain depth, which is # by default infinite. skip-tree = [ #"ansi_term@0.11.0", # will be skipped along with _all_ of its direct and transitive dependencies #{ crate = "ansi_term@0.11.0", depth = 20 }, ] # This section is considered when running `cargo deny check sources`. # More documentation about the 'sources' section can be found here: # https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html [sources] # Lint level for what to happen when a crate from a crate registry that is not # in the allow list is encountered unknown-registry = "warn" # Lint level for what to happen when a crate from a git repository that is not # in the allow list is encountered unknown-git = "warn" # List of URLs for allowed crate registries. Defaults to the crates.io index # if not specified. If it is specified but empty, no registries are allowed. allow-registry = ["https://github.com/rust-lang/crates.io-index"] # List of URLs for allowed Git repositories allow-git = [] [sources.allow-org] # github.com organizations to allow git sources for github = [] # gitlab.com organizations to allow git sources for gitlab = [] # bitbucket.org organizations to allow git sources for bitbucket = [] ================================================ FILE: rust-toolchain.toml ================================================ [toolchain] channel = "1.91" components = ["rustfmt", "clippy", "llvm-tools"] ================================================ FILE: rustfmt.toml ================================================ newline_style = "Unix" unstable_features = true # Cargo fmt now needs to be called with `cargo +nightly fmt` group_imports = "StdExternalCrate" # Create 3 groups: std, external crates, and self. imports_granularity = "Crate" # Merge imports from the same crate into a single use statement ================================================ FILE: scripts/install-protobuf.sh ================================================ #!/bin/bash # Helper script for Github Actions to install protobuf on different runners. echo "OS: $RUNNER_OS" if [ "$RUNNER_OS" == 'Linux' ]; then # Install on Linux sudo apt-get update sudo apt-get install -y protobuf-compiler elif [ "$RUNNER_OS" == 'macOS' ]; then # Install on macOS brew install protobuf elif [ "$RUNNER_OS" == 'Windows' ]; then # Install on Windows choco install -y protoc else echo "Unsupported OS: $RUNNER_OS" exit 1 fi # Check the installed Protobuf version protoc --version ================================================ FILE: scripts/update-vendor.sh ================================================ #!/bin/bash # A simple bash script to synchronize proto files from containerd to vendor/ directories of # each crate. # # VERSION specified containerd release that script will download to extract protobuf files. # # For each crate, the script expects a text file named `rsync.txt` in the crate's directory. # The file should contain a list of proto files that should be synchronized from containerd. VERSION="v2.3.0" set -x # Download containerd source code. wget https://github.com/containerd/containerd/archive/refs/tags/$VERSION.tar.gz -O containerd.tar.gz if [ $? -ne 0 ]; then echo "Error: Failed to download containerd source code." exit 1 fi # Ensure the file is removed on exit trap 'rm containerd.tar.gz' EXIT # Extract zip archive to a temporary directory. TEMP_DIR=$(mktemp -d) tar --extract \ --file containerd.tar.gz \ --strip-components=1 \ --directory $TEMP_DIR function sync_crate() { local crate_name=$1 local temp_dir=$2 rm -rf crates/$crate_name/vendor/github.com/containerd/containerd/ rsync -avm \ --include='*/' \ --include-from=crates/$crate_name/rsync.txt \ --exclude='*' \ $temp_dir/ \ crates/$crate_name/vendor/github.com/containerd/containerd/ } sync_crate "shim-protos" $TEMP_DIR sync_crate "snapshots" $TEMP_DIR sync_crate "client" $TEMP_DIR