[
  {
    "path": ".gitattributes",
    "content": "* text=auto eol=lf\n"
  },
  {
    "path": ".github/dependabot.yml",
    "content": "version: 2\nupdates:\n  - package-ecosystem: \"cargo\"\n    directory: \"/\"\n    schedule:\n      interval: \"daily\"\n    labels:\n      - T-dependencies\n\n  # Ensure that references to actions in a repository's workflow.yml file are kept up to date.\n  # See https://docs.github.com/en/code-security/supply-chain-security/keeping-your-dependencies-updated-automatically/keeping-your-actions-up-to-date-with-dependabot\n  - package-ecosystem: \"github-actions\"\n    directory: \"/\"\n    schedule:\n      interval: \"daily\"\n    labels:\n      # Mark PRs as CI related change.\n      - T-CI\n"
  },
  {
    "path": ".github/labeler.yml",
    "content": "# Automatically assign labels to PRs.\n# `C-` project crate(s) affected.\n# `T-` change type (CI, docs, etc).\n\nC-client:\n  - changed-files:\n      - any-glob-to-any-file: crates/client/**\n\nC-logging:\n  - changed-files:\n      - any-glob-to-any-file: crates/logging/**\n\nC-runc:\n  - changed-files:\n      - any-glob-to-any-file: crates/runc/**\n\nC-runc-shim:\n  - changed-files:\n      - any-glob-to-any-file: crates/runc-shim/**\n\nC-shim:\n  - changed-files:\n      - any-glob-to-any-file: crates/shim/**\n\nC-shim-protos:\n  - changed-files:\n      - any-glob-to-any-file: crates/shim-protos/**\n\nC-snapshots:\n  - changed-files:\n      - any-glob-to-any-file: crates/snapshots/**\n\nT-CI:\n  - changed-files:\n      - any-glob-to-any-file: [\".github/**\", \"*.toml\"]\n\nT-docs:\n  - changed-files:\n      - any-glob-to-any-file: \"**/*.md\"\n"
  },
  {
    "path": ".github/release.yml",
    "content": "changelog:\n  categories:\n    - title: Runc crate\n      labels:\n        - C-runc\n    - title: Runc shim crate\n      labels:\n        - C-runc-shim\n    - title: Shim crate\n      labels:\n        - C-shim\n    - title: Shim protos crate\n      labels:\n        - C-shim-protos\n    - title: Snapshots crate\n      labels:\n        - C-snapshots\n    - title: Client crate\n      labels:\n        - C-client\n    - title: Logging crate\n      labels:\n        - C-logging\n    - title: Other changes\n      labels:\n        - T-CI\n        - T-docs\n"
  },
  {
    "path": ".github/workflows/ci.yml",
    "content": "name: CI\non:\n  pull_request:\n  push:\n  merge_group:\n  schedule:\n    - cron: '0 0 * * *' # Every day at midnight\n\njobs:\n  checks:\n    name: Checks\n    runs-on: ${{ matrix.os }}\n    timeout-minutes: 20\n\n    strategy:\n      matrix:\n        include:\n          - os: ubuntu-latest\n            target: x86_64-unknown-linux-gnu\n          - os: ubuntu-latest\n            target: x86_64-unknown-linux-musl\n          - os: macos-latest\n            target: aarch64-apple-darwin\n\n    steps:\n      - uses: actions/checkout@v6\n\n      - run: ./scripts/install-protobuf.sh\n        shell: bash\n\n      - if: matrix.target == 'x86_64-unknown-linux-musl'\n        run: sudo apt-get update && sudo apt-get install -y musl-tools && rustup target add x86_64-unknown-linux-musl \n\n      - run: rustup toolchain install nightly --component rustfmt --target ${{ matrix.target }}\n      - run: cargo +nightly fmt --all -- --check\n\n      # the \"runc\" and \"containerd-shim\" crates have `sync` code that is not covered by the workspace\n      - run: cargo check -p runc --all-targets --target ${{ matrix.target }}\n      - run: cargo clippy -p runc --all-targets --target ${{ matrix.target }} -- -D warnings\n      - run: cargo check -p containerd-shim --all-targets --target ${{ matrix.target }}\n      - run: cargo clippy -p containerd-shim --all-targets --target ${{ matrix.target }}  -- -D warnings\n\n      # check the workspace\n      - run: cargo check --examples --tests --all-targets --target ${{ matrix.target }}\n      - run: cargo check --examples --tests --all-targets --all-features --target ${{ matrix.target }}\n      - run: cargo clippy --all-targets --target ${{ matrix.target }} -- -D warnings\n      - run: cargo clippy --all-targets --all-features --target ${{ matrix.target }} -- -D warnings\n\n      - run: cargo doc --no-deps --features docs\n        env:\n          RUSTDOCFLAGS: -Dwarnings\n\n      - name: check unused dependencies\n        uses: bnjbvr/cargo-machete@v0.9.2\n        env:\n          RUSTUP_TOOLCHAIN: \"stable\"\n\n  # TODO: Merge this with the checks job above\n  windows-checks:\n    name: Windows Checks\n    runs-on: windows-latest\n    timeout-minutes: 20\n\n    steps:\n      - uses: actions/checkout@v6\n      - run: ./scripts/install-protobuf.sh\n        shell: bash\n      - run: cargo check --examples --tests -p containerd-shim -p containerd-shim-protos -p containerd-client\n\n      - run: rustup toolchain install nightly --component rustfmt\n      - run: cargo +nightly fmt -p containerd-shim -p containerd-shim-protos -p containerd-client -- --check\n\n      - run: cargo clippy -p containerd-shim -p containerd-shim-protos -- -D warnings\n      - run: cargo doc --no-deps -p containerd-shim -p containerd-shim-protos -p containerd-client\n        env:\n          RUSTDOCFLAGS: -Dwarnings\n\n  tests:\n    name: Tests\n    runs-on: ${{ matrix.os }}\n    timeout-minutes: 15\n\n    strategy:\n      matrix:\n        os: [ubuntu-latest, macos-latest, windows-latest]\n\n    steps:\n      - uses: actions/checkout@v6\n      - run: ./scripts/install-protobuf.sh\n        shell: bash\n      - run: |\n          # runc-shim::cgroup::test_add_cgroup needs root permission to set cgroup\n          mkdir -p /tmp/dummy-xdr\n          sudo -E $(command -v cargo) test\n          sudo -E $(command -v cargo) test --all-features\n\n          # the shim has sync code that is not covered when running with --all-features\n          sudo -E $(command -v cargo) test -p containerd-shim\n        if: ${{ !contains(matrix.os, 'windows') }}\n        env:\n          # runc::tests::test_exec needs $XDG_RUNTIME_DIR to be set\n          XDG_RUNTIME_DIR: /tmp/dummy-xdr\n      - run: cargo test -p containerd-shim -p containerd-shim-protos -p containerd-client\n        if: ${{ contains(matrix.os, 'windows') }}\n\n  # Collect build timings\n  # See https://blog.rust-lang.org/2022/04/07/Rust-1.60.0.html#cargo---timings\n  timings:\n    name: Timings\n    runs-on: ubuntu-latest\n    timeout-minutes: 15\n\n    steps:\n      - uses: actions/checkout@v6\n      - run: ./scripts/install-protobuf.sh\n        shell: bash\n      - run: cargo build --all-features --timings\n      - uses: actions/upload-artifact@v7\n        with:\n          name: timings\n          path: target/cargo-timings/cargo-timing.html\n          if-no-files-found: error\n\n  deny:\n    name: Deny\n    runs-on: ubuntu-latest\n    timeout-minutes: 10\n\n    steps:\n      - uses: actions/checkout@v6\n      - uses: EmbarkStudios/cargo-deny-action@v2\n\n  linux-integration:\n    name: Linux Integration\n    runs-on: ${{ matrix.os }}\n    timeout-minutes: 40\n\n    strategy:\n      matrix:\n        os: [ubuntu-latest]\n        containerd: [v1.7.30, v2.1.6, v2.2.1]\n\n    steps:\n      - name: Checkout extensions\n        uses: actions/checkout@v6\n\n      - name: Download containerd archive\n        env:\n          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n        run: |\n          gh release download ${{ matrix.containerd }} \\\n            --repo containerd/containerd \\\n            --pattern 'containerd-1.*-linux-amd64.tar.gz' \\\n            --pattern 'containerd-2.*-linux-amd64.tar.gz' \\\n            --output containerd.tar.gz\n\n      - name: Extract containerd binaries to $HOME/.local/bin\n        run: |\n          mkdir -p $HOME/.local/bin\n          echo \"$HOME/.local/bin\" >> $GITHUB_PATH\n          tar -xf containerd.tar.gz -C $HOME/.local\n\n      - name: Checkout containerd\n        uses: actions/checkout@v6\n        with:\n          repository: containerd/containerd\n          path: src/github.com/containerd/containerd\n          ref: ${{ matrix.containerd }}\n\n      - name: Install shim\n        run: |\n          cargo build --release --bin containerd-shim-runc-v2-rs\n          sudo install -D ./target/release/containerd-shim-runc-v2-rs /usr/local/bin/\n\n      ## get latest go version for integrations tests so we can skip runnings tests\n      - uses: actions/setup-go@v6\n\n      - name: Integration\n        env:\n          TEST_RUNTIME: \"io.containerd.runc.v2-rs\"\n          TESTFLAGS_PARALLEL: 1\n          EXTRA_TESTFLAGS: \"-no-criu -test.skip='(TestContainerPTY|TestContainerExecLargeOutputWithTTY|TestTaskUpdate|TestTaskResize|TestContainerAttach|TestContainerAttachProcess|TestRuntimeInfo)'\"\n          TESTFLAGS_RACE: \"-race\"\n          # Pretend crun for now, remove after https://github.com/containerd/containerd/pull/9829\n          RUNC_FLAVOR: \"crun\"\n        run: |\n          sudo -E PATH=$PATH make integration\n        working-directory: src/github.com/containerd/containerd\n\n  windows-integration:\n    name: Windows Integration\n    runs-on: ${{ matrix.os }}\n    timeout-minutes: 40\n\n    strategy:\n      matrix:\n        os: [windows-latest]\n        containerd: [1.7.0]\n\n    steps:\n      - name: Checkout extensions\n        uses: actions/checkout@v6\n      - run: ./scripts/install-protobuf.sh\n        shell: bash\n\n      - name: Install containerd\n        run: |\n          $ErrorActionPreference = \"Stop\"\n\n          # Install containerd https://github.com/containerd/containerd/blob/v1.7.0/docs/getting-started.md#installing-containerd-on-windows\n          # Download and extract desired containerd Windows binaries\n          curl.exe -L https://github.com/containerd/containerd/releases/download/v${{ matrix.containerd }}/containerd-${{ matrix.containerd }}-windows-amd64.tar.gz -o containerd-windows-amd64.tar.gz\n          tar.exe xvf .\\containerd-windows-amd64.tar.gz\n\n          # Copy and configure\n          mkdir \"$Env:ProgramFiles\\containerd\"\n          Copy-Item -Path \".\\bin\\*\" -Destination \"$Env:ProgramFiles\\containerd\" -Recurse -Force\n          cd $Env:ProgramFiles\\containerd\\\n          .\\containerd.exe config default | Out-File config.toml -Encoding ascii\n\n          # Review the configuration. Depending on setup you may want to adjust:\n          # - the sandbox_image (Kubernetes pause image)\n          # - cni bin_dir and conf_dir locations\n          Get-Content config.toml\n\n          # Register and start service\n          .\\containerd.exe --register-service\n          Start-Service containerd\n        working-directory: ${{ runner.temp }}\n      - name: Run integration test\n        run: |\n          $ErrorActionPreference = \"Stop\"\n\n          get-service containerd\n          $env:TTRPC_ADDRESS=\"\\\\.\\pipe\\containerd-containerd.ttrpc\"\n\n          # run the example\n          cargo run -p containerd-shim --example skeleton -- -namespace default -id 1234 -address \"\\\\.\\pipe\\containerd-containerd\"  -publish-binary ./bin/containerd start\n          ps skeleton\n          cargo run -p containerd-shim-protos --example shim-proto-connect \\\\.\\pipe\\containerd-shim-bc764c65e177434fcefe8257dc440be8b8acf7c96156320d965938f7e9ae1a35-pipe\n          $skeleton = get-process skeleton -ErrorAction SilentlyContinue\n          if ($skeleton) { exit 1 }\n      - name: Run client\n        run: |\n          $ErrorActionPreference = \"Stop\"\n\n          get-service containerd\n          cargo run -p containerd-client --example version\n\n  # Currently Github actions UI supports no masks to mark matrix jobs as required to pass status checks.\n  # This means that every time version of Go, containerd, or OS is changed, a corresponding job should\n  # be added to the list of required checks. Which is not very convenient.\n  # To workaround this, a special job is added to report statuses of all other jobs, with fixed title.\n  # So it needs to be added to the list of required checks only once.\n  #\n  # See https://github.com/orgs/community/discussions/26822\n  results:\n    name: Report required job statuses\n    runs-on: ubuntu-latest\n    # List job dependencies which are required to pass status checks in order to be merged via merge queue.\n    needs: [checks, windows-checks, tests, deny, linux-integration, windows-integration]\n    if: ${{ always() }}\n    steps:\n      - run: exit 1\n        if: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') }}\n"
  },
  {
    "path": ".github/workflows/cover.yml",
    "content": "name: Coverage\non:\n  push:\n    branches: \"main\"\n  pull_request:\n    branches: \"main\"\n\njobs:\n  coverage:\n    name: Collect\n    runs-on: ubuntu-latest\n    timeout-minutes: 15\n\n    permissions:\n      statuses: write\n\n    steps:\n      - uses: actions/checkout@v6\n\n      - run: |\n          sudo apt-get install protobuf-compiler\n\n      - name: Install grcov\n        run: |\n          cargo install --locked grcov@0.8.24\n          grcov --version\n\n      - name: Tests\n        env:\n          RUSTFLAGS: \"-Cinstrument-coverage\"\n          LLVM_PROFILE_FILE: \"target/coverage/%p-%m.profraw\"\n        run: |\n          sudo -E $(command -v cargo) test --all-features\n\n          # Fix permissions after sudo.\n          sudo chown -R $(whoami) target/coverage/\n\n      - name: Collect coverage data\n        run: |\n          grcov . \\\n            --source-dir . \\\n            --binary-path ./target/debug/ \\\n            --branch \\\n            --ignore-not-existing \\\n            --output-types markdown,lcov \\\n            --keep-only 'crates/*' \\\n            --output-path ./target/coverage/\n\n      - name: Upload coverage data\n        uses: codecov/codecov-action@v6\n        with:\n          token: ${{ secrets.CODECOV_TOKEN }}\n          flags: unittests\n          files: ./target/coverage/lcov\n          verbose: true\n\n      - name: Publish job summary\n        run: |\n          echo \"# Coverage\" >> $GITHUB_STEP_SUMMARY\n          cat target/coverage/markdown.md >> $GITHUB_STEP_SUMMARY\n"
  },
  {
    "path": ".github/workflows/labeler.yml",
    "content": "name: PR Labeler\non:\n  # Runs workflow when activity on a PR in the workflow's repository occurs.\n  pull_request_target:\n\njobs:\n  triage:\n    permissions:\n      contents: read\n      pull-requests: write\n\n    name: Assign labels\n    runs-on: ubuntu-latest\n    timeout-minutes: 5\n\n    # Required by gh\n    env:\n      GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n      PR_URL: ${{ github.event.pull_request.html_url }}\n\n    steps:\n    - uses: actions/labeler@v6\n      with:\n        # Auto-include paths starting with dot (e.g. .github)\n        dot: true\n        # Remove labels when matching files are reverted or no longer changed by the PR\n        sync-labels: true\n\n    # Apply OS-windows label if PR title contains 'Windows'\n    - run: gh pr edit $PR_URL --add-label OS-windows\n      if: contains(github.event.pull_request.title, 'Windows')\n"
  },
  {
    "path": ".github/workflows/publish.yml",
    "content": "# Automates crate publishing\n# - Specify crate and version from the menu.\n# - Launch the job:\n#   + Updates Cargo.toml with the specified version\n#   + Commits and pushes the version bump\n#   + Publishes to crates.io\n#   + Adds and pushes a git tag \"<crate>-v<version>\"\n\nname: Release\non:\n  workflow_dispatch:\n    inputs:\n      crate:\n        description: \"Crate to publish\"\n        required: true\n        type: choice\n        options:\n          - client\n          - logging\n          - runc\n          - runc-shim\n          - shim\n          - shim-protos\n          - snapshots\n\n      version:\n        description: \"Version to publish (e.g. 0.8.1)\"\n        required: true\n        type: string\n\n      dryrun:\n        description: \"Dry run\"\n        required: false\n        type: boolean\n        default: false\n\njobs:\n  publish:\n    name: \"Publish ${{ inputs.crate }} v${{ inputs.version }}\"\n    runs-on: ubuntu-latest\n    timeout-minutes: 10\n\n    permissions:\n      contents: write\n\n    env:\n      CARGO_FILE: \"crates/${{ inputs.crate }}/Cargo.toml\"\n\n    steps:\n      - uses: actions/checkout@v6\n\n      - name: Validate version\n        env:\n          VERSION: ${{ inputs.version }}\n        run: |\n          if [[ ! \"$VERSION\" =~ ^[0-9]+\\.[0-9]+\\.[0-9]+(-[a-zA-Z0-9.]+)?$ ]]; then\n            echo \"::error::Invalid version: $VERSION (expected semver, e.g. 0.8.1 or 1.0.0-rc.1)\"\n            exit 1\n          fi\n\n      - name: Update crate version\n        env:\n          VERSION: ${{ inputs.version }}\n        run: sed -i \"s/^version = \\\".*\\\"/version = \\\"$VERSION\\\"/\" $CARGO_FILE\n\n      - name: Commit version bump\n        env:\n          CRATE: ${{ inputs.crate }}\n          VERSION: ${{ inputs.version }}\n        run: |\n          git config user.name \"GitHub Actions\"\n          git config user.email \"actions@github.com\"\n          git add $CARGO_FILE\n          git commit -m \"Bump $CRATE to v$VERSION\"\n\n      - name: Install protobuf\n        if: ${{ contains(fromJSON('[\"client\",\"snapshots\"]'), inputs.crate) }}\n        run: |\n          sudo apt update\n          sudo apt install protobuf-compiler\n\n      - name: Publish on crates.io\n        run: cargo publish $DRYRUN --manifest-path $CARGO_FILE\n        env:\n          DRYRUN: ${{ inputs.dryrun && '--dry-run' || '' }}\n          CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}\n\n      - name: Push commit and tag\n        if: ${{ !inputs.dryrun }}\n        env:\n          TAG: ${{ inputs.crate }}-v${{ inputs.version }}\n        run: |\n          git tag $TAG\n          git push --atomic origin HEAD $TAG\n"
  },
  {
    "path": ".github/workflows/stale.yml",
    "content": "name: 'Close stale issues and PRs'\non:\n  schedule:\n    - cron: \"0 0 * * *\" # Every day at midnight\n  pull_request:\n    paths:\n      - '.github/workflows/stale.yml'\n\npermissions: read-all\n\njobs:\n  stale:\n    runs-on: ubuntu-latest\n\n    permissions:\n      actions: write\n      contents: write # only for delete-branch option\n      issues: write\n      pull-requests: write\n\n    steps:\n      - uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0\n        # All stale bot options: https://github.com/actions/stale#all-options\n        with:\n          # Idle number of days before marking issues/PRs stale\n          days-before-stale: 90\n          # Idle number of days before closing stale issues/PRs\n          days-before-close: 7\n          # Comment on the staled issues\n          stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. This issue will be closed in 7 days unless new comments are made or the stale label is removed.'\n          # Comment on the staled PRs\n          stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. This PR will be closed in 7 days unless new comments are made or the stale label is removed.'\n          # Comment on the staled issues while closed\n          close-issue-message: 'This issue was closed because it has been stalled for 7 days with no activity.'\n          # Comment on the staled PRs while closed\n          close-pr-message: 'This PR was closed because it has been stalled for 7 days with no activity.'\n          # Enable dry-run when changing this file from a PR.\n          debug-only: ${{ github.event_name == 'pull_request' }}\n"
  },
  {
    "path": ".gitignore",
    "content": "# Generated by Cargo\n# will have compiled files and executables\n/target/\n\n# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries\n# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html\nCargo.lock\n\n# These are backup files generated by rustfmt\n**/*.rs.bk\nlog\n\n.vscode\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[workspace]\nmembers = [\n  \"crates/client\",\n  \"crates/logging\",\n  \"crates/runc\",\n  \"crates/runc-shim\",\n  \"crates/shim\",\n  \"crates/shim-protos\",\n  \"crates/snapshots\",\n]\nresolver = \"2\"\n\n[profile.release]\n# Keep binary as small as possible\n# https://doc.rust-lang.org/book/ch09-01-unrecoverable-errors-with-panic.html\npanic = 'abort'\n\n# Common for all crates\n# See https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#inheriting-a-dependency-from-a-workspace\n[workspace.package]\nlicense = \"Apache-2.0\"\nrepository = \"https://github.com/containerd/rust-extensions\"\nhomepage = \"https://containerd.io\"\nedition = \"2021\"\n\n# Common dependencies for all crates\n[workspace.dependencies]\nasync-trait = \"0.1.89\"\ncgroups-rs = { version = \"0.5\", default-features = false }\ncrossbeam = { version = \"0.8\", default-features = false }\nfutures = { version = \"0.3\", default-features = false }\nlibc = { version = \"0.2\", default-features = false }\nlog = { version = \"0.4\", default-features = false }\nnix = { version = \"0.31\", default-features = false }\noci-spec = { version = \"0.9\", default-features = false }\nprost = { version = \"0.14\", default-features = false }\nprost-build = { version = \"0.14\", default-features = false }\nprost-types = { version = \"0.14\", default-features = false }\nserde = { version = \"1.0\", default-features = false }\nserde_json = { version = \"1.0\", default-features = false }\nsimple_logger = { version = \"5.2\", default-features = false }\ntempfile = \"3.27\"\nthiserror = \"2.0\"\ntime = { version = \"0.3\", default-features = false }\ntokio = { version = \"1.50\", default-features = false }\ntonic = { version = \"0.14\", default-features = false }\ntonic-prost = \"0.14\"\ntonic-prost-build = { version = \"0.14\", default-features = false }\ntower = { version = \"0.5\", default-features = false }\nuuid = { version = \"1.22\", default-features = false }\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "MAINTAINERS",
    "content": "# rust-extensions maintainers\n#\n# As a containerd sub-project, containerd maintainers are also included from https://github.com/containerd/project/blob/main/MAINTAINERS.\n# See https://github.com/containerd/project/blob/main/GOVERNANCE.md for description of maintainer role\n#\n\n# REVIEWERS\n# GitHub ID, Name, Email address\n\"Burning1020\",\"Zhang Tianyang\",\"burning9699@gmail.com\"\n\"jsturtevant\",\"James Sturtevant\",\"jstur@microsoft.com\"\n\"mossaka\",\"Jiaxiao Zhou\",\"jiazho@microsoft.com\"\n\"jokemanfire\",\"Dingyang Hu\",\"hu.dingyang@zte.com.cn\"\n"
  },
  {
    "path": "README.md",
    "content": "# Rust extensions for containerd\n\n[![CI](https://github.com/mxpv/shim-rs/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/mxpv/shim-rs/actions/workflows/ci.yml)\n[![codecov](https://codecov.io/gh/containerd/rust-extensions/graph/badge.svg?token=VPUPN3MOFX)](https://codecov.io/gh/containerd/rust-extensions)\n[![Crates.io](https://img.shields.io/crates/l/containerd-client)](https://github.com/containerd/rust-extensions/blob/main/LICENSE)\n[![dependency status](https://deps.rs/repo/github/containerd/rust-extensions/status.svg)](https://deps.rs/repo/github/containerd/rust-extensions)\n\nA collection of Rust crates to extend containerd.\n\nThis repository contains the following crates:\n\n| Name | Description | Links |\n| --- | --- | --- |\n| [containerd-shim-protos](crates/shim-protos) | TTRPC bindings to shim interfaces | [![Crates.io](https://img.shields.io/crates/v/containerd-shim-protos)](https://crates.io/crates/containerd-shim-protos) [![docs.rs](https://img.shields.io/docsrs/containerd-shim-protos)](https://docs.rs/containerd-shim-protos/latest/containerd_shim_protos/) |\n| [containerd-shim-logging](crates/logging) | Shim logger plugins | [![Crates.io](https://img.shields.io/crates/v/containerd-shim-logging)](https://crates.io/crates/containerd-shim-logging) [![docs.rs](https://img.shields.io/docsrs/containerd-shim-logging)](https://docs.rs/containerd-shim-logging/latest/containerd_shim_logging/) |\n| [containerd-shim](crates/shim) | Runtime v2 shim wrapper | [![Crates.io](https://img.shields.io/crates/v/containerd-shim)](https://crates.io/crates/containerd-shim) [![docs.rs](https://img.shields.io/docsrs/containerd-shim)](https://docs.rs/containerd-shim/latest/containerd_shim/) |\n| [containerd-client](crates/client) | GRPC bindings to containerd APIs | [![Crates.io](https://img.shields.io/crates/v/containerd-client)](https://crates.io/crates/containerd-client) [![docs.rs](https://img.shields.io/docsrs/containerd-client)](https://docs.rs/containerd-client/latest/containerd_client/) |\n| [containerd-snapshots](crates/snapshots) | Remote snapshotter for containerd | [![Crates.io](https://img.shields.io/crates/v/containerd-snapshots)](https://crates.io/crates/containerd-snapshots) [![docs.rs](https://img.shields.io/docsrs/containerd-snapshots)](https://docs.rs/containerd-snapshots/latest/containerd_snapshots/) |\n| [runc](crates/runc) | Rust wrapper for runc CLI | [![Crates.io](https://img.shields.io/crates/v/runc)](https://crates.io/crates/runc) [![docs.rs](https://img.shields.io/docsrs/runc)](https://docs.rs/runc/latest/runc/) |\n| [containerd-runc-shim](crates/runc-shim) | Runtime v2 runc shim implementation | [![Crates.io](https://img.shields.io/crates/v/containerd-runc-shim)](https://crates.io/crates/containerd-runc-shim) |\n\n## How to build\nThe build process as easy as:\n```bash\ncargo build --release\n```\n\n## Minimum supported Rust version (MSRV)\nThe project typically targets the latest stable Rust version.\nPlease refer to [rust-toolchain.toml](./rust-toolchain.toml) for exact version currently used by our CIs.\n"
  },
  {
    "path": "clippy.toml",
    "content": "msrv = \"1.91\"\n"
  },
  {
    "path": "codecov.yml",
    "content": "comment: false\n"
  },
  {
    "path": "crates/client/Cargo.toml",
    "content": "[package]\nname = \"containerd-client\"\nversion = \"0.9.0\"\nauthors = [\n  \"Maksym Pavlenko <pavlenko.maksym@gmail.com>\",\n  \"The containerd Authors\",\n]\ndescription = \"GRPC bindings to containerd APIs\"\nkeywords = [\"containerd\", \"client\", \"grpc\", \"containers\"]\ncategories = [\"api-bindings\", \"asynchronous\"]\n\nedition.workspace = true\nlicense.workspace = true\nrepository.workspace = true\nhomepage.workspace = true\n\n[[example]]\nname = \"container\"\npath = \"examples/container.rs\"\n\n[[example]]\nname = \"version\"\npath = \"examples/version.rs\"\n\n[dependencies]\nhyper-util = { version = \"0.1.20\", default-features = false, features = [\"tokio\"] }\nprost = { workspace = true, features = [\"derive\", \"std\"] }\nprost-types = { workspace = true, features = [\"std\"] }\ntokio = { workspace = true, features = [\"net\"], optional = true }\ntonic = { workspace = true, features = [\"codegen\", \"channel\"] }\ntonic-prost.workspace = true\ntower = { workspace = true, features = [\"util\"], optional = true }\n\n[build-dependencies]\ntonic-prost-build.workspace = true\n\n[dev-dependencies]\ntokio = { workspace = true, features = [\"rt\", \"macros\", \"net\"] }\n\n[features]\nconnect = [\"tokio\", \"tower\"]\ndocs = []\n\n# Technically Tonic doesn't require Tokio and Tower dependencies here.\n# However we need them to implement `connect` helper and it's highly unlikely\n# that Tonic will be used with any other async runtime (see https://github.com/hyperium/tonic/issues/152)\n# So we enable `connect` feature by default (use `--no-default-features` otherwise).\ndefault = [\"connect\"]\n\n[package.metadata.docs.rs]\nfeatures = [\"docs\"]\n\n[package.metadata.cargo-machete]\nignored = [\"prost\", \"tonic-prost\"]\n"
  },
  {
    "path": "crates/client/README.md",
    "content": "# containerd GRPC client\n\n[![Crates.io](https://img.shields.io/crates/v/containerd-client)](https://crates.io/crates/containerd-client)\n[![docs.rs](https://img.shields.io/docsrs/containerd-client)](https://docs.rs/containerd-client/latest/containerd_client/)\n[![Crates.io](https://img.shields.io/crates/l/containerd-client)](https://github.com/containerd/rust-extensions/blob/main/LICENSE)\n[![CI](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml)\n\nThis crate implements a GRPC client to query containerd APIs.\n\n## Example\n\nRun with `cargo run --example version`\n\n```rust\nuse containerd_client::{connect, services::v1::version_client::VersionClient};\n\nasync fn query_version() {\n    // Launch containerd at /run/containerd/containerd.sock\n    let channel = connect(\"/run/containerd/containerd.sock\").await.unwrap();\n\n    let mut client = VersionClient::new(channel);\n    let resp = client.version(()).await.unwrap();\n\n    println!(\"Response: {:?}\", resp.get_ref());\n}\n```\n"
  },
  {
    "path": "crates/client/build.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{env, fs, io};\n\nconst PROTO_FILES: &[&str] = &[\n    // Types\n    \"types/descriptor.proto\",\n    \"types/metrics.proto\",\n    \"types/mount.proto\",\n    \"types/platform.proto\",\n    \"types/sandbox.proto\",\n    \"types/task/task.proto\",\n    \"types/transfer/imagestore.proto\",\n    \"types/transfer/importexport.proto\",\n    \"types/transfer/progress.proto\",\n    \"types/transfer/registry.proto\",\n    \"types/transfer/streaming.proto\",\n    // Services\n    \"services/containers/v1/containers.proto\",\n    \"services/content/v1/content.proto\",\n    \"services/diff/v1/diff.proto\",\n    \"services/events/v1/events.proto\",\n    \"services/images/v1/images.proto\",\n    \"services/introspection/v1/introspection.proto\",\n    \"services/leases/v1/leases.proto\",\n    \"services/namespaces/v1/namespace.proto\",\n    \"services/sandbox/v1/sandbox.proto\",\n    \"services/snapshots/v1/snapshots.proto\",\n    \"services/streaming/v1/streaming.proto\",\n    \"services/tasks/v1/tasks.proto\",\n    \"services/transfer/v1/transfer.proto\",\n    \"services/version/v1/version.proto\",\n    // Events\n    \"events/container.proto\",\n    \"events/content.proto\",\n    \"events/image.proto\",\n    \"events/namespace.proto\",\n    \"events/snapshot.proto\",\n    \"events/task.proto\",\n];\n\nconst FIXUP_MODULES: &[&str] = &[\n    \"containerd.services.diff.v1\",\n    \"containerd.services.images.v1\",\n    \"containerd.services.introspection.v1\",\n    \"containerd.services.sandbox.v1\",\n    \"containerd.services.snapshots.v1\",\n    \"containerd.services.tasks.v1\",\n    \"containerd.services.containers.v1\",\n    \"containerd.services.content.v1\",\n    \"containerd.services.events.v1\",\n];\n\nfn main() {\n    let mut config = tonic_prost_build::Config::new();\n    config.protoc_arg(\"--experimental_allow_proto3_optional\");\n    config.enable_type_names();\n\n    tonic_prost_build::configure()\n        .build_server(false)\n        .compile_with_config(\n            config,\n            PROTO_FILES,\n            &[\"vendor/github.com/containerd/containerd/api/\", \"vendor/\"],\n        )\n        .expect(\"Failed to generate GRPC bindings\");\n\n    for module in FIXUP_MODULES {\n        fixup_imports(module).expect(\"Failed to fixup module\");\n    }\n}\n\n// Original containerd's protobuf files contain Go style imports:\n// import \"github.com/containerd/containerd/api/types/mount.proto\";\n//\n// Tonic produces invalid code for these imports:\n// error[E0433]: failed to resolve: there are too many leading `super` keywords\n//   --> /containerd-rust-extensions/target/debug/build/containerd-client-protos-0a328c0c63f60cd0/out/containerd.services.diff.v1.rs:47:52\n//    |\n// 47 |     pub diff: ::core::option::Option<super::super::super::types::Descriptor>,\n//    |                                                    ^^^^^ there are too many leading `super` keywords\n//\n// This func fixes imports to crate level ones, like `crate::types::Mount`\nfn fixup_imports(path: &str) -> Result<(), io::Error> {\n    let out_dir = env::var(\"OUT_DIR\").unwrap();\n    let path = format!(\"{}/{}.rs\", out_dir, path);\n\n    let contents = fs::read_to_string(&path)?\n        .replace(\"super::super::super::v1::types\", \"crate::types::v1\") // for tasks service\n        .replace(\"super::super::super::super::types\", \"crate::types\")\n        .replace(\"super::super::super::types\", \"crate::types\")\n        .replace(\"super::super::super::super::google\", \"crate::google\")\n        .replace(\n            \"/// \tfilters\\\\[0\\\\] or filters\\\\[1\\\\] or ... or filters\\\\[n-1\\\\] or filters\\\\[n\\\\]\",\n            r#\"\n            /// ```notrust\n            /// \tfilters[0] or filters[1] or ... or filters[n-1] or filters[n]\n            /// ```\"#,\n        );\n    fs::write(path, contents)?;\n    Ok(())\n}\n"
  },
  {
    "path": "crates/client/examples/container.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{fs, fs::File};\n\nuse client::{\n    services::v1::{\n        container::Runtime, containers_client::ContainersClient, tasks_client::TasksClient,\n        Container, CreateContainerRequest, CreateTaskRequest, DeleteContainerRequest,\n        DeleteTaskRequest, StartRequest, WaitRequest,\n    },\n    with_namespace,\n};\nuse containerd_client as client;\nuse prost_types::Any;\nuse tonic::Request;\n\nconst CID: &str = \"abc123\";\nconst NAMESPACE: &str = \"default\";\n\n/// Make sure you run containerd before running this example.\n/// NOTE: to run this example, you must prepare a rootfs.\n#[tokio::main(flavor = \"current_thread\")]\nasync fn main() {\n    let channel = client::connect(\"/run/containerd/containerd.sock\")\n        .await\n        .expect(\"Connect Failed\");\n\n    let mut client = ContainersClient::new(channel.clone());\n\n    let rootfs = \"/tmp/busybox/bundle/rootfs\";\n    // the container will run with command `echo $output`\n    let output = \"hello rust client\";\n\n    let spec = include_str!(\"container_spec.json\");\n    let spec = spec\n        .to_string()\n        .replace(\"$ROOTFS\", rootfs)\n        .replace(\"$OUTPUT\", output);\n\n    let spec = Any {\n        type_url: \"types.containerd.io/opencontainers/runtime-spec/1/Spec\".to_string(),\n        value: spec.into_bytes(),\n    };\n\n    let container = Container {\n        id: CID.to_string(),\n        image: \"docker.io/library/alpine:latest\".to_string(),\n        runtime: Some(Runtime {\n            name: \"io.containerd.runc.v2\".to_string(),\n            options: None,\n        }),\n        spec: Some(spec),\n        ..Default::default()\n    };\n\n    let req = CreateContainerRequest {\n        container: Some(container),\n    };\n    let req = with_namespace!(req, NAMESPACE);\n\n    let _resp = client\n        .create(req)\n        .await\n        .expect(\"Failed to create container\");\n\n    println!(\"Container: {:?} created\", CID);\n\n    // create temp dir for stdin/stdout/stderr\n    let tmp = std::env::temp_dir().join(\"containerd-client-test\");\n    fs::create_dir_all(&tmp).expect(\"Failed to create temp directory\");\n    let stdin = tmp.join(\"stdin\");\n    let stdout = tmp.join(\"stdout\");\n    let stderr = tmp.join(\"stderr\");\n    File::create(&stdin).expect(\"Failed to create stdin\");\n    File::create(&stdout).expect(\"Failed to create stdout\");\n    File::create(&stderr).expect(\"Failed to create stderr\");\n\n    // creat and start task\n    let mut client = TasksClient::new(channel.clone());\n\n    let req = CreateTaskRequest {\n        container_id: CID.to_string(),\n        stdin: stdin.to_str().unwrap().to_string(),\n        stdout: stdout.to_str().unwrap().to_string(),\n        stderr: stderr.to_str().unwrap().to_string(),\n        ..Default::default()\n    };\n    let req = with_namespace!(req, NAMESPACE);\n\n    let _resp = client.create(req).await.expect(\"Failed to create task\");\n\n    println!(\"Task: {:?} created\", CID);\n\n    let req = StartRequest {\n        container_id: CID.to_string(),\n        ..Default::default()\n    };\n    let req = with_namespace!(req, NAMESPACE);\n\n    let _resp = client.start(req).await.expect(\"Failed to start task\");\n\n    println!(\"Task: {:?} started\", CID);\n\n    // wait task\n    let req = WaitRequest {\n        container_id: CID.to_string(),\n        ..Default::default()\n    };\n    let req = with_namespace!(req, NAMESPACE);\n\n    let _resp = client.wait(req).await.expect(\"Failed to wait task\");\n\n    println!(\"Task: {:?} stopped\", CID);\n\n    // delete task\n    let req = DeleteTaskRequest {\n        container_id: CID.to_string(),\n    };\n    let req = with_namespace!(req, NAMESPACE);\n\n    let _resp = client.delete(req).await.expect(\"Failed to delete task\");\n\n    println!(\"Task: {:?} deleted\", CID);\n\n    // delete container\n    let mut client = ContainersClient::new(channel);\n\n    let req = DeleteContainerRequest {\n        id: CID.to_string(),\n    };\n    let req = with_namespace!(req, NAMESPACE);\n\n    let _resp = client\n        .delete(req)\n        .await\n        .expect(\"Failed to delete container\");\n\n    println!(\"Container: {:?} deleted\", CID);\n\n    // test container output\n    let actual_stdout = fs::read_to_string(stdout).expect(\"read stdout actual\");\n    assert_eq!(actual_stdout.strip_suffix('\\n').unwrap(), output);\n\n    // clear stdin/stdout/stderr\n    let _ = fs::remove_dir_all(tmp);\n}\n"
  },
  {
    "path": "crates/client/examples/container_events.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse client::{\n    events::{ContainerCreate, ContainerDelete},\n    services::v1::{events_client::EventsClient, SubscribeRequest},\n};\nuse containerd_client as client;\n\n/// Make sure you run containerd before running this example.\n#[tokio::main(flavor = \"current_thread\")]\nasync fn main() {\n    let channel = client::connect(\"/run/containerd/containerd.sock\")\n        .await\n        .expect(\"Connect Failed\");\n\n    let mut client = EventsClient::new(channel.clone());\n\n    let request = SubscribeRequest::default();\n    let mut response = client\n        .subscribe(request)\n        .await\n        .expect(\"failed to subscribe to events\")\n        .into_inner();\n\n    loop {\n        match response.message().await {\n            Ok(event) => {\n                if let Some(event) = event {\n                    match event.topic.as_str() {\n                        \"/containers/create\" => {\n                            if let Some(mut payload) = event.event {\n                                // Containerd doesn't send event payloads with a leading slash on the type URL, which is\n                                // required by the `Any` type specification. We add it manually here so that `prost` can\n                                // properly decode the payload.\n                                if !payload.type_url.starts_with('/') {\n                                    payload.type_url.insert(0, '/');\n                                }\n\n                                let payload: ContainerCreate = payload\n                                    .to_msg()\n                                    .expect(\"failed to parse ContainerCreate payload\");\n\n                                println!(\n                                    \"container created: id={} payload={:?}\",\n                                    payload.id, payload\n                                );\n                            }\n                        }\n                        \"/containers/delete\" => {\n                            if let Some(mut payload) = event.event {\n                                // Containerd doesn't send event payloads with a leading slash on the type URL, which is\n                                // required by the `Any` type specification. We add it manually here so that `prost` can\n                                // properly decode the payload.\n                                if !payload.type_url.starts_with('/') {\n                                    payload.type_url.insert(0, '/');\n                                }\n\n                                let payload: ContainerDelete = payload\n                                    .to_msg()\n                                    .expect(\"failed to parse ContainerDelete payload\");\n\n                                println!(\n                                    \"container deleted: id={} payload={:?}\",\n                                    payload.id, payload\n                                );\n                            }\n                        }\n                        _ => {}\n                    }\n                }\n            }\n            Err(e) => {\n                eprintln!(\"error while streaming events: {:?}\", e);\n                break;\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "crates/client/examples/container_pull.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::env::consts;\n\nuse client::{\n    services::v1::{transfer_client::TransferClient, TransferOptions, TransferRequest},\n    to_any,\n    types::{\n        transfer::{ImageStore, OciRegistry, UnpackConfiguration},\n        Platform,\n    },\n    with_namespace,\n};\nuse containerd_client as client;\nuse tonic::Request;\n\nconst IMAGE: &str = \"docker.io/library/alpine:latest\";\nconst NAMESPACE: &str = \"default\";\n\n/// Make sure you run containerd before running this example.\n/// NOTE: to run this example, you must prepare a rootfs.\n#[tokio::main(flavor = \"current_thread\")]\nasync fn main() {\n    let arch = match consts::ARCH {\n        \"x86_64\" => \"amd64\",\n        \"aarch64\" => \"arm64\",\n        _ => consts::ARCH,\n    };\n\n    let channel = client::connect(\"/run/containerd/containerd.sock\")\n        .await\n        .expect(\"Connect Failed\");\n    let mut client = TransferClient::new(channel.clone());\n\n    // Create the source (OCIRegistry)\n    let source = OciRegistry {\n        reference: IMAGE.to_string(),\n        resolver: Default::default(),\n    };\n\n    let platform = Platform {\n        os: \"linux\".to_string(),\n        architecture: arch.to_string(),\n        variant: \"\".to_string(),\n        os_version: \"\".to_string(),\n        os_features: vec![],\n    };\n\n    // Create the destination (ImageStore)\n    let destination = ImageStore {\n        name: IMAGE.to_string(),\n        platforms: vec![platform.clone()],\n        unpacks: vec![UnpackConfiguration {\n            platform: Some(platform),\n            ..Default::default()\n        }],\n        ..Default::default()\n    };\n\n    let anys = to_any(&source);\n    let anyd = to_any(&destination);\n\n    println!(\"Pulling image for linux/{} from source: {:?}\", arch, source);\n\n    // Create the transfer request\n    let request = TransferRequest {\n        source: Some(anys),\n        destination: Some(anyd),\n        options: Some(TransferOptions {\n            ..Default::default()\n        }),\n    };\n    // Execute the transfer (pull)\n    client\n        .transfer(with_namespace!(request, NAMESPACE))\n        .await\n        .expect(\"unable to transfer image\");\n}\n"
  },
  {
    "path": "crates/client/examples/container_spec.json",
    "content": "{\n    \"ociVersion\": \"1.0.0-rc2-dev\",\n    \"platform\": {\n        \"os\": \"linux\",\n        \"arch\": \"amd64\"\n    },\n    \"process\": {\n        \"terminal\": false,\n        \"consoleSize\": {\n            \"height\": 0,\n            \"width\": 0\n        },\n        \"user\": {\n            \"uid\": 0,\n            \"gid\": 0\n        },\n        \"args\": [ \"echo\", \"$OUTPUT\" ],\n        \"env\": [\n            \"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\n            \"TERM=xterm\"\n        ],\n        \"cwd\": \"/\",\n        \"rlimits\": [{\n            \"type\": \"RLIMIT_NOFILE\",\n            \"hard\": 1024,\n            \"soft\": 1024\n        }],\n        \"noNewPrivileges\": true\n    },\n    \"root\": {\n        \"path\": \"$ROOTFS\",\n        \"readonly\": false\n    },\n    \"hostname\": \"test\",\n    \"mounts\": [{\n            \"destination\": \"/proc\",\n            \"type\": \"proc\",\n            \"source\": \"proc\"\n        },\n        {\n            \"destination\": \"/dev\",\n            \"type\": \"tmpfs\",\n            \"source\": \"tmpfs\",\n            \"options\": [\n                \"nosuid\",\n                \"strictatime\",\n                \"mode=755\",\n                \"size=65536k\"\n            ]\n        },\n        {\n            \"destination\": \"/dev/pts\",\n            \"type\": \"devpts\",\n            \"source\": \"devpts\",\n            \"options\": [\n                \"nosuid\",\n                \"noexec\",\n                \"newinstance\",\n                \"ptmxmode=0666\",\n                \"mode=0620\",\n                \"gid=5\"\n            ]\n        },\n        {\n            \"destination\": \"/dev/shm\",\n            \"type\": \"tmpfs\",\n            \"source\": \"shm\",\n            \"options\": [\n                \"nosuid\",\n                \"noexec\",\n                \"nodev\",\n                \"mode=1777\",\n                \"size=65536k\"\n            ]\n        },\n        {\n            \"destination\": \"/dev/mqueue\",\n            \"type\": \"mqueue\",\n            \"source\": \"mqueue\",\n            \"options\": [\n                \"nosuid\",\n                \"noexec\",\n                \"nodev\"\n            ]\n        },\n        {\n            \"destination\": \"/sys\",\n            \"type\": \"sysfs\",\n            \"source\": \"sysfs\",\n            \"options\": [\n                \"nosuid\",\n                \"noexec\",\n                \"nodev\",\n                \"ro\"\n            ]\n        },\n        {\n            \"destination\": \"/sys/fs/cgroup\",\n            \"type\": \"cgroup\",\n            \"source\": \"cgroup\",\n            \"options\": [\n                \"nosuid\",\n                \"noexec\",\n                \"nodev\",\n                \"relatime\",\n                \"ro\"\n            ]\n        }\n    ],\n    \"hooks\": {},\n    \"linux\": {\n        \"devices\": [],\n        \"cgroupsPath\": \"kata/vfiotest\",\n        \"resources\": {\n            \"devices\": [\n                {\"allow\":false,\"access\":\"rwm\"},\n                {\"allow\":true,\"type\":\"c\",\"major\":1,\"minor\":3,\"access\":\"rwm\"},\n                {\"allow\":true,\"type\":\"c\",\"major\":1,\"minor\":5,\"access\":\"rwm\"},\n                {\"allow\":true,\"type\":\"c\",\"major\":1,\"minor\":8,\"access\":\"rwm\"},\n                {\"allow\":true,\"type\":\"c\",\"major\":1,\"minor\":9,\"access\":\"rwm\"},\n                {\"allow\":true,\"type\":\"c\",\"major\":5,\"minor\":0,\"access\":\"rwm\"},\n                {\"allow\":true,\"type\":\"c\",\"major\":5,\"minor\":1,\"access\":\"rwm\"}\n            ]\n        },\n        \"namespaces\": [{\n                \"type\": \"pid\"\n            },\n            {\n                \"type\": \"network\"\n            },\n            {\n                \"type\": \"ipc\"\n            },\n            {\n                \"type\": \"uts\"\n            },\n            {\n                \"type\": \"mount\"\n            }\n        ],\n        \"maskedPaths\": [\n            \"/proc/kcore\",\n            \"/proc/latency_stats\",\n            \"/proc/timer_list\",\n            \"/proc/timer_stats\",\n            \"/proc/sched_debug\",\n            \"/sys/firmware\"\n        ],\n        \"readonlyPaths\": [\n            \"/proc/asound\",\n            \"/proc/bus\",\n            \"/proc/fs\",\n            \"/proc/irq\",\n            \"/proc/sys\",\n            \"/proc/sysrq-trigger\"\n        ]\n    }\n}\n"
  },
  {
    "path": "crates/client/examples/version.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse containerd_client::Client;\n\n/// Make sure you run containerd before running this example.\n#[tokio::main(flavor = \"current_thread\")]\nasync fn main() {\n    #[cfg(unix)]\n    let path = \"/var/run/containerd/containerd.sock\";\n\n    #[cfg(windows)]\n    let path = r\"\\\\.\\pipe\\containerd-containerd\";\n\n    let client = Client::from_path(path).await.expect(\"Connect failed\");\n\n    let resp = client\n        .version()\n        .version(())\n        .await\n        .expect(\"Failed to query version\");\n\n    println!(\"Response: {:?}\", resp.get_ref());\n}\n"
  },
  {
    "path": "crates/client/rsync.txt",
    "content": "api/events/*.proto\napi/services/**/*.proto\napi/types/*.proto\napi/types/**/*.proto\nprotobuf/plugin/fieldpath.proto\n"
  },
  {
    "path": "crates/client/src/lib.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n#![cfg_attr(feature = \"docs\", doc = include_str!(\"../README.md\"))]\n// No way to derive Eq with tonic :(\n// See https://github.com/hyperium/tonic/issues/1056\n#![allow(clippy::derive_partial_eq_without_eq)]\n\npub use tonic;\n\n/// Generated `containerd.types` types.\npub mod types {\n    tonic::include_proto!(\"containerd.types\");\n\n    pub mod v1 {\n        tonic::include_proto!(\"containerd.v1.types\");\n    }\n    pub mod transfer {\n        tonic::include_proto!(\"containerd.types.transfer\");\n    }\n}\n\n/// Generated `google.rpc` types, containerd services typically use some of these types.\npub mod google {\n    #[allow(rustdoc::broken_intra_doc_links)]\n    pub mod rpc {\n        tonic::include_proto!(\"google.rpc\");\n    }\n}\n\n/// Generated `containerd.services.*` services.\npub mod services {\n    #[allow(clippy::tabs_in_doc_comments)]\n    #[allow(rustdoc::invalid_rust_codeblocks)]\n    #[allow(rustdoc::invalid_html_tags)]\n    pub mod v1 {\n        tonic::include_proto!(\"containerd.services.containers.v1\");\n        tonic::include_proto!(\"containerd.services.content.v1\");\n        tonic::include_proto!(\"containerd.services.diff.v1\");\n        tonic::include_proto!(\"containerd.services.events.v1\");\n        tonic::include_proto!(\"containerd.services.images.v1\");\n        tonic::include_proto!(\"containerd.services.introspection.v1\");\n        tonic::include_proto!(\"containerd.services.leases.v1\");\n        tonic::include_proto!(\"containerd.services.namespaces.v1\");\n        tonic::include_proto!(\"containerd.services.streaming.v1\");\n        tonic::include_proto!(\"containerd.services.tasks.v1\");\n        tonic::include_proto!(\"containerd.services.transfer.v1\");\n\n        // Sandbox services (Controller and Store) don't make it clear that they are for sandboxes.\n        // Wrap these into a sub module to make the names more clear.\n        pub mod sandbox {\n            tonic::include_proto!(\"containerd.services.sandbox.v1\");\n        }\n\n        // Snapshot's `Info` conflicts with Content's `Info`, so wrap it into a separate sub module.\n        pub mod snapshots {\n            tonic::include_proto!(\"containerd.services.snapshots.v1\");\n        }\n\n        tonic::include_proto!(\"containerd.services.version.v1\");\n    }\n}\n\n/// Generated event types.\npub mod events {\n    tonic::include_proto!(\"containerd.events\");\n}\n\n/// Connect creates a unix channel to containerd GRPC socket.\n///\n/// This helper intended to be used in conjunction with [Tokio](https://tokio.rs) runtime.\n#[cfg(feature = \"connect\")]\npub async fn connect(\n    path: impl AsRef<std::path::Path>,\n) -> Result<tonic::transport::Channel, tonic::transport::Error> {\n    use tonic::transport::Endpoint;\n\n    let path = path.as_ref().to_path_buf();\n\n    // Taken from https://github.com/hyperium/tonic/blob/71fca362d7ffbb230547f23b3f2fb75c414063a8/examples/src/uds/client.rs#L21-L28\n    // There will ignore this uri because uds do not use it\n    // and make connection with UnixStream::connect.\n    let channel = Endpoint::try_from(\"http://[::]\")?\n        .connect_with_connector(tower::service_fn(move |_| {\n            let path = path.clone();\n\n            async move {\n                #[cfg(unix)]\n                {\n                    Ok::<_, std::io::Error>(hyper_util::rt::TokioIo::new(\n                        tokio::net::UnixStream::connect(path).await?,\n                    ))\n                }\n\n                #[cfg(windows)]\n                {\n                    let client = tokio::net::windows::named_pipe::ClientOptions::new()\n                        .open(&path)\n                        .map_err(|e| std::io::Error::from(e))?;\n\n                    Ok::<_, std::io::Error>(hyper_util::rt::TokioIo::new(client))\n                }\n            }\n        }))\n        .await?;\n\n    Ok(channel)\n}\n\nuse prost::{Message, Name};\nuse prost_types::Any;\n\n// to_any provides a helper to match the current use of the protobuf \"fullname\" trait\n// in the Go code on the gRPC server side in containerd when handling matching of Any\n// types to registered types on the server. Further discussion on future direction\n// of typeurl in this issue: https://github.com/containerd/rust-extensions/issues/362\npub fn to_any<T: Message + Name>(m: &T) -> Any {\n    let mut anyt = Any::from_msg(m).unwrap();\n    anyt.type_url = T::full_name();\n    anyt\n}\n\n/// Help to inject namespace into request.\n///\n/// To use this macro, the `tonic::Request` is needed.\n#[macro_export]\nmacro_rules! with_namespace {\n    ($req:expr, $ns:expr) => {{\n        let mut req = Request::new($req);\n        let md = req.metadata_mut();\n        // https://github.com/containerd/containerd/blob/main/pkg/namespaces/grpc.go#L27\n        md.insert(\"containerd-namespace\", $ns.parse().unwrap());\n        req\n    }};\n}\n\nuse services::v1::{\n    containers_client::ContainersClient,\n    content_client::ContentClient,\n    diff_client::DiffClient,\n    events_client::EventsClient,\n    images_client::ImagesClient,\n    introspection_client::IntrospectionClient,\n    leases_client::LeasesClient,\n    namespaces_client::NamespacesClient,\n    sandbox::{controller_client::ControllerClient, store_client::StoreClient},\n    snapshots::snapshots_client::SnapshotsClient,\n    streaming_client::StreamingClient,\n    tasks_client::TasksClient,\n    transfer_client::TransferClient,\n    version_client::VersionClient,\n};\nuse tonic::transport::{Channel, Error};\n\n/// Client to containerd's APIs.\npub struct Client {\n    channel: Channel,\n}\n\nimpl From<Channel> for Client {\n    fn from(value: Channel) -> Self {\n        Self { channel: value }\n    }\n}\n\nimpl Client {\n    /// Create a new client from UDS socket.\n    #[cfg(feature = \"connect\")]\n    pub async fn from_path(path: impl AsRef<std::path::Path>) -> Result<Self, Error> {\n        let channel = connect(path).await?;\n        Ok(Self { channel })\n    }\n\n    /// Access to the underlying Tonic channel.\n    #[inline]\n    pub fn channel(&self) -> Channel {\n        self.channel.clone()\n    }\n\n    /// Version service.\n    #[inline]\n    pub fn version(&self) -> VersionClient<Channel> {\n        VersionClient::new(self.channel())\n    }\n\n    /// Task service client.\n    #[inline]\n    pub fn tasks(&self) -> TasksClient<Channel> {\n        TasksClient::new(self.channel())\n    }\n\n    /// Transfer service client.\n    #[inline]\n    pub fn transfer(&self) -> TransferClient<Channel> {\n        TransferClient::new(self.channel())\n    }\n\n    /// Sandbox store client.\n    #[inline]\n    pub fn sandbox_store(&self) -> StoreClient<Channel> {\n        StoreClient::new(self.channel())\n    }\n\n    /// Streaming services client.\n    #[inline]\n    pub fn streaming(&self) -> StreamingClient<Channel> {\n        StreamingClient::new(self.channel())\n    }\n\n    /// Sandbox controller client.\n    #[inline]\n    pub fn sandbox_controller(&self) -> ControllerClient<Channel> {\n        ControllerClient::new(self.channel())\n    }\n\n    /// Snapshots service.\n    #[inline]\n    pub fn snapshots(&self) -> SnapshotsClient<Channel> {\n        SnapshotsClient::new(self.channel())\n    }\n\n    /// Namespaces service.\n    #[inline]\n    pub fn namespaces(&self) -> NamespacesClient<Channel> {\n        NamespacesClient::new(self.channel())\n    }\n\n    /// Leases service.\n    #[inline]\n    pub fn leases(&self) -> LeasesClient<Channel> {\n        LeasesClient::new(self.channel())\n    }\n\n    /// Intropection service.\n    #[inline]\n    pub fn introspection(&self) -> IntrospectionClient<Channel> {\n        IntrospectionClient::new(self.channel())\n    }\n\n    /// Image service.\n    #[inline]\n    pub fn images(&self) -> ImagesClient<Channel> {\n        ImagesClient::new(self.channel())\n    }\n\n    /// Event service.\n    #[inline]\n    pub fn events(&self) -> EventsClient<Channel> {\n        EventsClient::new(self.channel())\n    }\n\n    /// Diff service.\n    #[inline]\n    pub fn diff(&self) -> DiffClient<Channel> {\n        DiffClient::new(self.channel())\n    }\n\n    /// Content service.\n    #[inline]\n    pub fn content(&self) -> ContentClient<Channel> {\n        ContentClient::new(self.channel())\n    }\n\n    /// Container service.\n    #[inline]\n    pub fn containers(&self) -> ContainersClient<Channel> {\n        ContainersClient::new(self.channel())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use prost_types::Any;\n\n    use crate::events::ContainerCreate;\n\n    #[test]\n    fn any_roundtrip() {\n        let original = ContainerCreate {\n            id: \"test\".to_string(),\n            image: \"test\".to_string(),\n            runtime: None,\n        };\n\n        let any = Any::from_msg(&original).expect(\"should not fail to encode\");\n        let decoded: ContainerCreate = any.to_msg().expect(\"should not fail to decode\");\n\n        assert_eq!(original, decoded)\n    }\n}\n"
  },
  {
    "path": "crates/client/vendor/README.md",
    "content": "# Vendor\n\nThis directory contains vendor dependencies needed to generate protobuf bindings.\n\nProto files are copy-pasted directly from upstream repos:\n+ https://github.com/containerd/containerd\n+ https://github.com/protocolbuffers/protobuf\n+ https://github.com/gogo/protobuf\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/events/container.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"google/protobuf/any.proto\";\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage ContainerCreate {\n  string id = 1;\n  string image = 2;\n  message Runtime {\n    string name = 1;\n    google.protobuf.Any options = 2;\n  }\n  Runtime runtime = 3;\n}\n\nmessage ContainerUpdate {\n  string id = 1;\n  string image = 2;\n  map<string, string> labels = 3;\n  string snapshot_key = 4;\n}\n\nmessage ContainerDelete {\n  string id = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/events/content.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage ContentCreate {\n  string digest = 1;\n  int64 size = 2;\n}\n\nmessage ContentDelete {\n  string digest = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/events/image.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.images.v1;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage ImageCreate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage ImageUpdate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage ImageDelete {\n  string name = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/events/namespace.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage NamespaceCreate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage NamespaceUpdate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage NamespaceDelete {\n  string name = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/events/sandbox.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\n\nmessage SandboxCreate {\n  string sandbox_id = 1;\n}\n\nmessage SandboxStart {\n  string sandbox_id = 1;\n}\n\nmessage SandboxExit {\n  string sandbox_id = 1;\n  uint32 exit_status = 2;\n  google.protobuf.Timestamp exited_at = 3;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/events/snapshot.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage SnapshotPrepare {\n  string key = 1;\n  string parent = 2;\n  string snapshotter = 5;\n}\n\nmessage SnapshotCommit {\n  string key = 1;\n  string name = 2;\n  string snapshotter = 5;\n}\n\nmessage SnapshotRemove {\n  string key = 1;\n  string snapshotter = 5;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/events/task.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/fieldpath.proto\";\nimport \"types/mount.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage TaskCreate {\n  string container_id = 1;\n  string bundle = 2;\n  repeated containerd.types.Mount rootfs = 3;\n  TaskIO io = 4;\n  string checkpoint = 5;\n  uint32 pid = 6;\n}\n\nmessage TaskStart {\n  string container_id = 1;\n  uint32 pid = 2;\n}\n\nmessage TaskDelete {\n  string container_id = 1;\n  uint32 pid = 2;\n  uint32 exit_status = 3;\n  google.protobuf.Timestamp exited_at = 4;\n  // id is the specific exec. By default if omitted will be `\"\"` thus matches\n  // the init exec of the task matching `container_id`.\n  string id = 5;\n}\n\nmessage TaskIO {\n  string stdin = 1;\n  string stdout = 2;\n  string stderr = 3;\n  bool terminal = 4;\n}\n\nmessage TaskExit {\n  string container_id = 1;\n  string id = 2;\n  uint32 pid = 3;\n  uint32 exit_status = 4;\n  google.protobuf.Timestamp exited_at = 5;\n}\n\nmessage TaskOOM {\n  string container_id = 1;\n}\n\nmessage TaskExecAdded {\n  string container_id = 1;\n  string exec_id = 2;\n}\n\nmessage TaskExecStarted {\n  string container_id = 1;\n  string exec_id = 2;\n  uint32 pid = 3;\n}\n\nmessage TaskPaused {\n  string container_id = 1;\n}\n\nmessage TaskResumed {\n  string container_id = 1;\n}\n\nmessage TaskCheckpointed {\n  string container_id = 1;\n  string checkpoint = 2;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.containers.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/field_mask.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/containers/v1;containers\";\n\n// Containers provides metadata storage for containers used in the execution\n// service.\n//\n// The objects here provide an state-independent view of containers for use in\n// management and resource pinning. From that perspective, containers do not\n// have a \"state\" but rather this is the set of resources that will be\n// considered in use by the container.\n//\n// From the perspective of the execution service, these objects represent the\n// base parameters for creating a container process.\n//\n// In general, when looking to add fields for this type, first ask yourself\n// whether or not the function of the field has to do with runtime execution or\n// is invariant of the runtime state of the container. If it has to do with\n// runtime, or changes as the \"container\" is started and stops, it probably\n// doesn't belong on this object.\nservice Containers {\n  rpc Get(GetContainerRequest) returns (GetContainerResponse);\n  rpc List(ListContainersRequest) returns (ListContainersResponse);\n  rpc ListStream(ListContainersRequest) returns (stream ListContainerMessage);\n  rpc Create(CreateContainerRequest) returns (CreateContainerResponse);\n  rpc Update(UpdateContainerRequest) returns (UpdateContainerResponse);\n  rpc Delete(DeleteContainerRequest) returns (google.protobuf.Empty);\n}\n\nmessage Container {\n  // ID is the user-specified identifier.\n  //\n  // This field may not be updated.\n  string id = 1;\n\n  // Labels provides an area to include arbitrary data on containers.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  //\n  // Note that to add a new value to this field, read the existing set and\n  // include the entire result in the update call.\n  map<string, string> labels = 2;\n\n  // Image contains the reference of the image used to build the\n  // specification and snapshots for running this container.\n  //\n  // If this field is updated, the spec and rootfs needed to updated, as well.\n  string image = 3;\n\n  message Runtime {\n    // Name is the name of the runtime.\n    string name = 1;\n    // Options specify additional runtime initialization options.\n    google.protobuf.Any options = 2;\n  }\n  // Runtime specifies which runtime to use for executing this container.\n  Runtime runtime = 4;\n\n  // Spec to be used when creating the container. This is runtime specific.\n  google.protobuf.Any spec = 5;\n\n  // Snapshotter specifies the snapshotter name used for rootfs\n  string snapshotter = 6;\n\n  // SnapshotKey specifies the snapshot key to use for the container's root\n  // filesystem. When starting a task from this container, a caller should\n  // look up the mounts from the snapshot service and include those on the\n  // task create request.\n  //\n  // Snapshots referenced in this field will not be garbage collected.\n  //\n  // This field is set to empty when the rootfs is not a snapshot.\n  //\n  // This field may be updated.\n  string snapshot_key = 7;\n\n  // CreatedAt is the time the container was first created.\n  google.protobuf.Timestamp created_at = 8;\n\n  // UpdatedAt is the last time the container was mutated.\n  google.protobuf.Timestamp updated_at = 9;\n\n  // Extensions allow clients to provide zero or more blobs that are directly\n  // associated with the container. One may provide protobuf, json, or other\n  // encoding formats. The primary use of this is to further decorate the\n  // container object with fields that may be specific to a client integration.\n  //\n  // The key portion of this map should identify a \"name\" for the extension\n  // that should be unique against other extensions. When updating extension\n  // data, one should only update the specified extension using field paths\n  // to select a specific map key.\n  map<string, google.protobuf.Any> extensions = 10;\n\n  // Sandbox ID this container belongs to.\n  string sandbox = 11;\n}\n\nmessage GetContainerRequest {\n  string id = 1;\n}\n\nmessage GetContainerResponse {\n  Container container = 1;\n}\n\nmessage ListContainersRequest {\n  // Filters contains one or more filters using the syntax defined in the\n  // containerd filter package.\n  //\n  // The returned result will be those that match any of the provided\n  // filters. Expanded, containers that match the following will be\n  // returned:\n  //\n  //\tfilters[0] or filters[1] or ... or filters[n-1] or filters[n]\n  //\n  // If filters is zero-length or nil, all items will be returned.\n  repeated string filters = 1;\n}\n\nmessage ListContainersResponse {\n  repeated Container containers = 1;\n}\n\nmessage CreateContainerRequest {\n  Container container = 1;\n}\n\nmessage CreateContainerResponse {\n  Container container = 1;\n}\n\n// UpdateContainerRequest updates the metadata on one or more container.\n//\n// The operation should follow semantics described in\n// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,\n// unless otherwise qualified.\nmessage UpdateContainerRequest {\n  // Container provides the target values, as declared by the mask, for the update.\n  //\n  // The ID field must be set.\n  Container container = 1;\n\n  // UpdateMask specifies which fields to perform the update on. If empty,\n  // the operation applies to all fields.\n  google.protobuf.FieldMask update_mask = 2;\n}\n\nmessage UpdateContainerResponse {\n  Container container = 1;\n}\n\nmessage DeleteContainerRequest {\n  string id = 1;\n}\n\nmessage ListContainerMessage {\n  Container container = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.content.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/field_mask.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/content/v1;content\";\n\n// Content provides access to a content addressable storage system.\nservice Content {\n  // Info returns information about a committed object.\n  //\n  // This call can be used for getting the size of content and checking for\n  // existence.\n  rpc Info(InfoRequest) returns (InfoResponse);\n\n  // Update updates content metadata.\n  //\n  // This call can be used to manage the mutable content labels. The\n  // immutable metadata such as digest, size, and committed at cannot\n  // be updated.\n  rpc Update(UpdateRequest) returns (UpdateResponse);\n\n  // List streams the entire set of content as Info objects and closes the\n  // stream.\n  //\n  // Typically, this will yield a large response, chunked into messages.\n  // Clients should make provisions to ensure they can handle the entire data\n  // set.\n  rpc List(ListContentRequest) returns (stream ListContentResponse);\n\n  // Delete will delete the referenced object.\n  rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty);\n\n  // Read allows one to read an object based on the offset into the content.\n  //\n  // The requested data may be returned in one or more messages.\n  rpc Read(ReadContentRequest) returns (stream ReadContentResponse);\n\n  // Status returns the status for a single reference.\n  rpc Status(StatusRequest) returns (StatusResponse);\n\n  // ListStatuses returns the status of ongoing object ingestions, started via\n  // Write.\n  //\n  // Only those matching the regular expression will be provided in the\n  // response. If the provided regular expression is empty, all ingestions\n  // will be provided.\n  rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse);\n\n  // Write begins or resumes writes to a resource identified by a unique ref.\n  // Only one active stream may exist at a time for each ref.\n  //\n  // Once a write stream has started, it may only write to a single ref, thus\n  // once a stream is started, the ref may be omitted on subsequent writes.\n  //\n  // For any write transaction represented by a ref, only a single write may\n  // be made to a given offset. If overlapping writes occur, it is an error.\n  // Writes should be sequential and implementations may throw an error if\n  // this is required.\n  //\n  // If expected_digest is set and already part of the content store, the\n  // write will fail.\n  //\n  // When completed, the commit flag should be set to true. If expected size\n  // or digest is set, the content will be validated against those values.\n  rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse);\n\n  // Abort cancels the ongoing write named in the request. Any resources\n  // associated with the write will be collected.\n  rpc Abort(AbortRequest) returns (google.protobuf.Empty);\n}\n\nmessage Info {\n  // Digest is the hash identity of the blob.\n  string digest = 1;\n\n  // Size is the total number of bytes in the blob.\n  int64 size = 2;\n\n  // CreatedAt provides the time at which the blob was committed.\n  google.protobuf.Timestamp created_at = 3;\n\n  // UpdatedAt provides the time the info was last updated.\n  google.protobuf.Timestamp updated_at = 4;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 5;\n}\n\nmessage InfoRequest {\n  string digest = 1;\n}\n\nmessage InfoResponse {\n  Info info = 1;\n}\n\nmessage UpdateRequest {\n  Info info = 1;\n\n  // UpdateMask specifies which fields to perform the update on. If empty,\n  // the operation applies to all fields.\n  //\n  // In info, Digest, Size, and CreatedAt are immutable,\n  // other field may be updated using this mask.\n  // If no mask is provided, all mutable field are updated.\n  google.protobuf.FieldMask update_mask = 2;\n}\n\nmessage UpdateResponse {\n  Info info = 1;\n}\n\nmessage ListContentRequest {\n  // Filters contains one or more filters using the syntax defined in the\n  // containerd filter package.\n  //\n  // The returned result will be those that match any of the provided\n  // filters. Expanded, containers that match the following will be\n  // returned:\n  //\n  //\tfilters[0] or filters[1] or ... or filters[n-1] or filters[n]\n  //\n  // If filters is zero-length or nil, all items will be returned.\n  repeated string filters = 1;\n}\n\nmessage ListContentResponse {\n  repeated Info info = 1;\n}\n\nmessage DeleteContentRequest {\n  // Digest specifies which content to delete.\n  string digest = 1;\n}\n\n// ReadContentRequest defines the fields that make up a request to read a portion of\n// data from a stored object.\nmessage ReadContentRequest {\n  // Digest is the hash identity to read.\n  string digest = 1;\n\n  // Offset specifies the number of bytes from the start at which to begin\n  // the read. If zero or less, the read will be from the start. This uses\n  // standard zero-indexed semantics.\n  int64 offset = 2;\n\n  // size is the total size of the read. If zero, the entire blob will be\n  // returned by the service.\n  int64 size = 3;\n}\n\n// ReadContentResponse carries byte data for a read request.\nmessage ReadContentResponse {\n  int64 offset = 1; // offset of the returned data\n  bytes data = 2; // actual data\n}\n\nmessage Status {\n  google.protobuf.Timestamp started_at = 1;\n  google.protobuf.Timestamp updated_at = 2;\n  string ref = 3;\n  int64 offset = 4;\n  int64 total = 5;\n  string expected = 6;\n}\n\nmessage StatusRequest {\n  string ref = 1;\n}\n\nmessage StatusResponse {\n  Status status = 1;\n}\n\nmessage ListStatusesRequest {\n  repeated string filters = 1;\n}\n\nmessage ListStatusesResponse {\n  repeated Status statuses = 1;\n}\n\n// WriteAction defines the behavior of a WriteRequest.\nenum WriteAction {\n  // WriteActionStat instructs the writer to return the current status while\n  // holding the lock on the write.\n  STAT = 0;\n\n  // WriteActionWrite sets the action for the write request to write data.\n  //\n  // Any data included will be written at the provided offset. The\n  // transaction will be left open for further writes.\n  //\n  // This is the default.\n  WRITE = 1;\n\n  // WriteActionCommit will write any outstanding data in the message and\n  // commit the write, storing it under the digest.\n  //\n  // This can be used in a single message to send the data, verify it and\n  // commit it.\n  //\n  // This action will always terminate the write.\n  COMMIT = 2;\n}\n\n// WriteContentRequest writes data to the request ref at offset.\nmessage WriteContentRequest {\n  // Action sets the behavior of the write.\n  //\n  // When this is a write and the ref is not yet allocated, the ref will be\n  // allocated and the data will be written at offset.\n  //\n  // If the action is write and the ref is allocated, it will accept data to\n  // an offset that has not yet been written.\n  //\n  // If the action is write and there is no data, the current write status\n  // will be returned. This works differently from status because the stream\n  // holds a lock.\n  WriteAction action = 1;\n\n  // Ref identifies the pre-commit object to write to.\n  string ref = 2;\n\n  // Total can be set to have the service validate the total size of the\n  // committed content.\n  //\n  // The latest value before or with the commit action message will be use to\n  // validate the content. If the offset overflows total, the service may\n  // report an error. It is only required on one message for the write.\n  //\n  // If the value is zero or less, no validation of the final content will be\n  // performed.\n  int64 total = 3;\n\n  // Expected can be set to have the service validate the final content against\n  // the provided digest.\n  //\n  // If the digest is already present in the object store, an AlreadyExists\n  // error will be returned.\n  //\n  // Only the latest version will be used to check the content against the\n  // digest. It is only required to include it on a single message, before or\n  // with the commit action message.\n  string expected = 4;\n\n  // Offset specifies the number of bytes from the start at which to begin\n  // the write. For most implementations, this means from the start of the\n  // file. This uses standard, zero-indexed semantics.\n  //\n  // If the action is write, the remote may remove all previously written\n  // data after the offset. Implementations may support arbitrary offsets but\n  // MUST support reseting this value to zero with a write. If an\n  // implementation does not support a write at a particular offset, an\n  // OutOfRange error must be returned.\n  int64 offset = 5;\n\n  // Data is the actual bytes to be written.\n  //\n  // If this is empty and the message is not a commit, a response will be\n  // returned with the current write state.\n  bytes data = 6;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 7;\n}\n\n// WriteContentResponse is returned on the culmination of a write call.\nmessage WriteContentResponse {\n  // Action contains the action for the final message of the stream. A writer\n  // should confirm that they match the intended result.\n  WriteAction action = 1;\n\n  // StartedAt provides the time at which the write began.\n  //\n  // This must be set for stat and commit write actions. All other write\n  // actions may omit this.\n  google.protobuf.Timestamp started_at = 2;\n\n  // UpdatedAt provides the last time of a successful write.\n  //\n  // This must be set for stat and commit write actions. All other write\n  // actions may omit this.\n  google.protobuf.Timestamp updated_at = 3;\n\n  // Offset is the current committed size for the write.\n  int64 offset = 4;\n\n  // Total provides the current, expected total size of the write.\n  //\n  // We include this to provide consistency with the Status structure on the\n  // client writer.\n  //\n  // This is only valid on the Stat and Commit response.\n  int64 total = 5;\n\n  // Digest, if present, includes the digest up to the currently committed\n  // bytes. If action is commit, this field will be set. It is implementation\n  // defined if this is set for other actions.\n  string digest = 6;\n}\n\nmessage AbortRequest {\n  string ref = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.diff.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/descriptor.proto\";\nimport \"types/mount.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/diff/v1;diff\";\n\n// Diff service creates and applies diffs\nservice Diff {\n  // Apply applies the content associated with the provided digests onto\n  // the provided mounts. Archive content will be extracted and\n  // decompressed if necessary.\n  rpc Apply(ApplyRequest) returns (ApplyResponse);\n\n  // Diff creates a diff between the given mounts and uploads the result\n  // to the content store.\n  rpc Diff(DiffRequest) returns (DiffResponse);\n}\n\nmessage ApplyRequest {\n  // Diff is the descriptor of the diff to be extracted\n  containerd.types.Descriptor diff = 1;\n\n  repeated containerd.types.Mount mounts = 2;\n\n  map<string, google.protobuf.Any> payloads = 3;\n  // SyncFs is to synchronize the underlying filesystem containing files.\n  bool sync_fs = 4;\n}\n\nmessage ApplyResponse {\n  // Applied is the descriptor for the object which was applied.\n  // If the input was a compressed blob then the result will be\n  // the descriptor for the uncompressed blob.\n  containerd.types.Descriptor applied = 1;\n}\n\nmessage DiffRequest {\n  // Left are the mounts which represent the older copy\n  // in which is the base of the computed changes.\n  repeated containerd.types.Mount left = 1;\n\n  // Right are the mounts which represents the newer copy\n  // in which changes from the left were made into.\n  repeated containerd.types.Mount right = 2;\n\n  // MediaType is the media type descriptor for the created diff\n  // object\n  string media_type = 3;\n\n  // Ref identifies the pre-commit content store object. This\n  // reference can be used to get the status from the content store.\n  string ref = 4;\n\n  // Labels are the labels to apply to the generated content\n  // on content store commit.\n  map<string, string> labels = 5;\n\n  // SourceDateEpoch specifies the timestamp used to provide control for reproducibility.\n  // See also https://reproducible-builds.org/docs/source-date-epoch/ .\n  //\n  // Since containerd v2.0, the whiteout timestamps are set to zero (1970-01-01),\n  // not to the source date epoch.\n  google.protobuf.Timestamp source_date_epoch = 6;\n}\n\nmessage DiffResponse {\n  // Diff is the descriptor of the diff which can be applied\n  containerd.types.Descriptor diff = 3;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.events.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"types/event.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/events/v1;events\";\n\nservice Events {\n  // Publish an event to a topic.\n  //\n  // The event will be packed into a timestamp envelope with the namespace\n  // introspected from the context. The envelope will then be dispatched.\n  rpc Publish(PublishRequest) returns (google.protobuf.Empty);\n\n  // Forward sends an event that has already been packaged into an envelope\n  // with a timestamp and namespace.\n  //\n  // This is useful if earlier timestamping is required or when forwarding on\n  // behalf of another component, namespace or publisher.\n  rpc Forward(ForwardRequest) returns (google.protobuf.Empty);\n\n  // Subscribe to a stream of events, possibly returning only that match any\n  // of the provided filters.\n  //\n  // Unlike many other methods in containerd, subscribers will get messages\n  // from all namespaces unless otherwise specified. If this is not desired,\n  // a filter can be provided in the format 'namespace==<namespace>' to\n  // restrict the received events.\n  rpc Subscribe(SubscribeRequest) returns (stream containerd.types.Envelope);\n}\n\nmessage PublishRequest {\n  string topic = 1;\n  google.protobuf.Any event = 2;\n}\n\nmessage ForwardRequest {\n  containerd.types.Envelope envelope = 1;\n}\n\nmessage SubscribeRequest {\n  repeated string filters = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.images.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/field_mask.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/descriptor.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/images/v1;images\";\n\n// Images is a service that allows one to register images with containerd.\n//\n// In containerd, an image is merely the mapping of a name to a content root,\n// described by a descriptor. The behavior and state of image is purely\n// dictated by the type of the descriptor.\n//\n// From the perspective of this service, these references are mostly shallow,\n// in that the existence of the required content won't be validated until\n// required by consuming services.\n//\n// As such, this can really be considered a \"metadata service\".\nservice Images {\n  // Get returns an image by name.\n  rpc Get(GetImageRequest) returns (GetImageResponse);\n\n  // List returns a list of all images known to containerd.\n  rpc List(ListImagesRequest) returns (ListImagesResponse);\n\n  // Create an image record in the metadata store.\n  //\n  // The name of the image must be unique.\n  rpc Create(CreateImageRequest) returns (CreateImageResponse);\n\n  // Update assigns the name to a given target image based on the provided\n  // image.\n  rpc Update(UpdateImageRequest) returns (UpdateImageResponse);\n\n  // Delete deletes the image by name.\n  rpc Delete(DeleteImageRequest) returns (google.protobuf.Empty);\n}\n\nmessage Image {\n  // Name provides a unique name for the image.\n  //\n  // Containerd treats this as the primary identifier.\n  string name = 1;\n\n  // Labels provides free form labels for the image. These are runtime only\n  // and do not get inherited into the package image in any way.\n  //\n  // Labels may be updated using the field mask.\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 2;\n\n  // Target describes the content entry point of the image.\n  containerd.types.Descriptor target = 3;\n\n  // CreatedAt is the time the image was first created.\n  google.protobuf.Timestamp created_at = 7;\n\n  // UpdatedAt is the last time the image was mutated.\n  google.protobuf.Timestamp updated_at = 8;\n}\n\nmessage GetImageRequest {\n  string name = 1;\n}\n\nmessage GetImageResponse {\n  Image image = 1;\n}\n\nmessage CreateImageRequest {\n  Image image = 1;\n\n  google.protobuf.Timestamp source_date_epoch = 2;\n}\n\nmessage CreateImageResponse {\n  Image image = 1;\n}\n\nmessage UpdateImageRequest {\n  // Image provides a full or partial image for update.\n  //\n  // The name field must be set or an error will be returned.\n  Image image = 1;\n\n  // UpdateMask specifies which fields to perform the update on. If empty,\n  // the operation applies to all fields.\n  google.protobuf.FieldMask update_mask = 2;\n\n  google.protobuf.Timestamp source_date_epoch = 3;\n}\n\nmessage UpdateImageResponse {\n  Image image = 1;\n}\n\nmessage ListImagesRequest {\n  // Filters contains one or more filters using the syntax defined in the\n  // containerd filter package.\n  //\n  // The returned result will be those that match any of the provided\n  // filters. Expanded, images that match the following will be\n  // returned:\n  //\n  //\tfilters[0] or filters[1] or ... or filters[n-1] or filters[n]\n  //\n  // If filters is zero-length or nil, all items will be returned.\n  repeated string filters = 1;\n}\n\nmessage ListImagesResponse {\n  repeated Image images = 1;\n}\n\nmessage DeleteImageRequest {\n  string name = 1;\n\n  // Sync indicates that the delete and cleanup should be done\n  // synchronously before returning to the caller\n  //\n  // Default is false\n  bool sync = 2;\n\n  // Target value for image to be deleted\n  //\n  // If image descriptor does not match the same digest,\n  // the delete operation will return \"not found\" error.\n  optional containerd.types.Descriptor target = 3;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.introspection.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"google/rpc/status.proto\";\nimport \"types/introspection.proto\";\nimport \"types/platform.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/introspection/v1;introspection\";\n\nservice Introspection {\n  // Plugins returns a list of plugins in containerd.\n  //\n  // Clients can use this to detect features and capabilities when using\n  // containerd.\n  rpc Plugins(PluginsRequest) returns (PluginsResponse);\n  // Server returns information about the containerd server\n  rpc Server(google.protobuf.Empty) returns (ServerResponse);\n  // PluginInfo returns information directly from a plugin if the plugin supports it\n  rpc PluginInfo(PluginInfoRequest) returns (PluginInfoResponse);\n}\n\nmessage Plugin {\n  // Type defines the type of plugin.\n  //\n  // See package plugin for a list of possible values. Non core plugins may\n  // define their own values during registration.\n  string type = 1;\n\n  // ID identifies the plugin uniquely in the system.\n  string id = 2;\n\n  // Requires lists the plugin types required by this plugin.\n  repeated string requires = 3;\n\n  // Platforms enumerates the platforms this plugin will support.\n  //\n  // If values are provided here, the plugin will only be operable under the\n  // provided platforms.\n  //\n  // If this is empty, the plugin will work across all platforms.\n  //\n  // If the plugin prefers certain platforms over others, they should be\n  // listed from most to least preferred.\n  repeated types.Platform platforms = 4;\n\n  // Exports allows plugins to provide values about state or configuration to\n  // interested parties.\n  //\n  // One example is exposing the configured path of a snapshotter plugin.\n  map<string, string> exports = 5;\n\n  // Capabilities allows plugins to communicate feature switches to allow\n  // clients to detect features that may not be on be default or may be\n  // different from version to version.\n  //\n  // Use this sparingly.\n  repeated string capabilities = 6;\n\n  // InitErr will be set if the plugin fails initialization.\n  //\n  // This means the plugin may have been registered but a non-terminal error\n  // was encountered during initialization.\n  //\n  // Plugins that have this value set cannot be used.\n  google.rpc.Status init_err = 7;\n}\n\nmessage PluginsRequest {\n  // Filters contains one or more filters using the syntax defined in the\n  // containerd filter package.\n  //\n  // The returned result will be those that match any of the provided\n  // filters. Expanded, plugins that match the following will be\n  // returned:\n  //\n  //\tfilters[0] or filters[1] or ... or filters[n-1] or filters[n]\n  //\n  // If filters is zero-length or nil, all items will be returned.\n  repeated string filters = 1;\n}\n\nmessage PluginsResponse {\n  repeated Plugin plugins = 1;\n}\n\nmessage ServerResponse {\n  string uuid = 1;\n  uint64 pid = 2;\n  uint64 pidns = 3; // PID namespace, such as 4026531836\n  repeated DeprecationWarning deprecations = 4;\n}\n\nmessage DeprecationWarning {\n  string id = 1;\n  string message = 2;\n  google.protobuf.Timestamp last_occurrence = 3;\n}\n\nmessage PluginInfoRequest {\n  string type = 1;\n  string id = 2;\n\n  // Options may be used to request extra dynamic information from\n  // a plugin.\n  // This object is determined by the plugin and the plugin may return\n  // NotImplemented or InvalidArgument if it is not supported\n  google.protobuf.Any options = 3;\n}\n\nmessage PluginInfoResponse {\n  Plugin plugin = 1;\n  google.protobuf.Any extra = 2;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\nsyntax = \"proto3\";\n\npackage containerd.services.leases.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/leases/v1;leases\";\n\n// Leases service manages resources leases within the metadata store.\nservice Leases {\n  // Create creates a new lease for managing changes to metadata. A lease\n  // can be used to protect objects from being removed.\n  rpc Create(CreateRequest) returns (CreateResponse);\n\n  // Delete deletes the lease and makes any unreferenced objects created\n  // during the lease eligible for garbage collection if not referenced\n  // or retained by other resources during the lease.\n  rpc Delete(DeleteRequest) returns (google.protobuf.Empty);\n\n  // List lists all active leases, returning the full list of\n  // leases and optionally including the referenced resources.\n  rpc List(ListRequest) returns (ListResponse);\n\n  // AddResource references the resource by the provided lease.\n  rpc AddResource(AddResourceRequest) returns (google.protobuf.Empty);\n\n  // DeleteResource dereferences the resource by the provided lease.\n  rpc DeleteResource(DeleteResourceRequest) returns (google.protobuf.Empty);\n\n  // ListResources lists all the resources referenced by the lease.\n  rpc ListResources(ListResourcesRequest) returns (ListResourcesResponse);\n}\n\n// Lease is an object which retains resources while it exists.\nmessage Lease {\n  string id = 1;\n\n  google.protobuf.Timestamp created_at = 2;\n\n  map<string, string> labels = 3;\n}\n\nmessage CreateRequest {\n  // ID is used to identity the lease, when the id is not set the service\n  // generates a random identifier for the lease.\n  string id = 1;\n\n  map<string, string> labels = 3;\n}\n\nmessage CreateResponse {\n  Lease lease = 1;\n}\n\nmessage DeleteRequest {\n  string id = 1;\n\n  // Sync indicates that the delete and cleanup should be done\n  // synchronously before returning to the caller\n  //\n  // Default is false\n  bool sync = 2;\n}\n\nmessage ListRequest {\n  repeated string filters = 1;\n}\n\nmessage ListResponse {\n  repeated Lease leases = 1;\n}\n\nmessage Resource {\n  string id = 1;\n\n  // For snapshotter resource, there are many snapshotter types here, like\n  // overlayfs, devmapper etc. The type will be formatted with type,\n  // like \"snapshotter/overlayfs\".\n  string type = 2;\n}\n\nmessage AddResourceRequest {\n  string id = 1;\n\n  Resource resource = 2;\n}\n\nmessage DeleteResourceRequest {\n  string id = 1;\n\n  Resource resource = 2;\n}\n\nmessage ListResourcesRequest {\n  string id = 1;\n}\n\nmessage ListResourcesResponse {\n  repeated Resource resources = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/mounts/v1/mounts.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\nsyntax = \"proto3\";\n\npackage containerd.services.mounts.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/field_mask.proto\";\nimport \"types/mount.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/mounts/v1;mounts\";\n\n// Mounts service manages mounts\nservice Mounts {\n  rpc Activate(ActivateRequest) returns (ActivateResponse);\n  rpc Deactivate(DeactivateRequest) returns (google.protobuf.Empty);\n  rpc Info(InfoRequest) returns (InfoResponse);\n  rpc Update(UpdateRequest) returns (UpdateResponse);\n  rpc List(ListRequest) returns (stream ListMessage);\n}\n\nmessage ActivateRequest {\n  string name = 1;\n\n  repeated containerd.types.Mount mounts = 2;\n\n  map<string, string> labels = 3;\n\n  bool temporary = 4;\n}\n\nmessage ActivateResponse {\n  containerd.types.ActivationInfo info = 1;\n}\n\nmessage DeactivateRequest {\n  string name = 1;\n}\n\nmessage InfoRequest {\n  string name = 1;\n}\n\nmessage InfoResponse {\n  containerd.types.ActivationInfo info = 1;\n}\n\nmessage UpdateRequest {\n  containerd.types.ActivationInfo info = 1;\n\n  google.protobuf.FieldMask update_mask = 2;\n}\n\nmessage UpdateResponse {\n  containerd.types.ActivationInfo info = 1;\n}\n\nmessage ListRequest {\n  repeated string filters = 1;\n}\n\nmessage ListMessage {\n  containerd.types.ActivationInfo info = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.namespaces.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/field_mask.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/namespaces/v1;namespaces\";\n\n// Namespaces provides the ability to manipulate containerd namespaces.\n//\n// All objects in the system are required to be a member of a namespace. If a\n// namespace is deleted, all objects, including containers, images and\n// snapshots, will be deleted, as well.\n//\n// Unless otherwise noted, operations in containerd apply only to the namespace\n// supplied per request.\n//\n// I hope this goes without saying, but namespaces are themselves NOT\n// namespaced.\nservice Namespaces {\n  rpc Get(GetNamespaceRequest) returns (GetNamespaceResponse);\n  rpc List(ListNamespacesRequest) returns (ListNamespacesResponse);\n  rpc Create(CreateNamespaceRequest) returns (CreateNamespaceResponse);\n  rpc Update(UpdateNamespaceRequest) returns (UpdateNamespaceResponse);\n  rpc Delete(DeleteNamespaceRequest) returns (google.protobuf.Empty);\n}\n\nmessage Namespace {\n  string name = 1;\n\n  // Labels provides an area to include arbitrary data on namespaces.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  //\n  // Note that to add a new value to this field, read the existing set and\n  // include the entire result in the update call.\n  map<string, string> labels = 2;\n}\n\nmessage GetNamespaceRequest {\n  string name = 1;\n}\n\nmessage GetNamespaceResponse {\n  Namespace namespace = 1;\n}\n\nmessage ListNamespacesRequest {\n  string filter = 1;\n}\n\nmessage ListNamespacesResponse {\n  repeated Namespace namespaces = 1;\n}\n\nmessage CreateNamespaceRequest {\n  Namespace namespace = 1;\n}\n\nmessage CreateNamespaceResponse {\n  Namespace namespace = 1;\n}\n\n// UpdateNamespaceRequest updates the metadata for a namespace.\n//\n// The operation should follow semantics described in\n// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,\n// unless otherwise qualified.\nmessage UpdateNamespaceRequest {\n  // Namespace provides the target value, as declared by the mask, for the update.\n  //\n  // The namespace field must be set.\n  Namespace namespace = 1;\n\n  // UpdateMask specifies which fields to perform the update on. If empty,\n  // the operation applies to all fields.\n  //\n  // For the most part, this applies only to selectively updating labels on\n  // the namespace. While field masks are typically limited to ascii alphas\n  // and digits, we just take everything after the \"labels.\" as the map key.\n  google.protobuf.FieldMask update_mask = 2;\n}\n\nmessage UpdateNamespaceResponse {\n  Namespace namespace = 1;\n}\n\nmessage DeleteNamespaceRequest {\n  string name = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\n// Sandbox is a v2 runtime extension that allows more complex execution environments for containers.\n// This adds a notion of groups of containers that share same lifecycle and/or resources.\n// A few good fits for sandbox can be:\n// - A \"pause\" container in k8s, that acts as a parent process for child containers to hold network namespace.\n// - (micro)VMs that launch a VM process and executes containers inside guest OS.\n// containerd in this case remains implementation agnostic and delegates sandbox handling to runtimes.\n// See proposal and discussion here: https://github.com/containerd/containerd/issues/4131\npackage containerd.services.sandbox.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/metrics.proto\";\nimport \"types/mount.proto\";\nimport \"types/platform.proto\";\nimport \"types/sandbox.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/sandbox/v1;sandbox\";\n\n// Store provides a metadata storage interface for sandboxes. Similarly to `Containers`,\n// sandbox object includes info required to start a new instance, but no runtime state.\n// When running a new sandbox instance, store objects are used as base type to create from.\nservice Store {\n  rpc Create(StoreCreateRequest) returns (StoreCreateResponse);\n  rpc Update(StoreUpdateRequest) returns (StoreUpdateResponse);\n  rpc Delete(StoreDeleteRequest) returns (StoreDeleteResponse);\n  rpc List(StoreListRequest) returns (StoreListResponse);\n  rpc Get(StoreGetRequest) returns (StoreGetResponse);\n}\n\nmessage StoreCreateRequest {\n  containerd.types.Sandbox sandbox = 1;\n}\n\nmessage StoreCreateResponse {\n  containerd.types.Sandbox sandbox = 1;\n}\n\nmessage StoreUpdateRequest {\n  containerd.types.Sandbox sandbox = 1;\n  repeated string fields = 2;\n}\n\nmessage StoreUpdateResponse {\n  containerd.types.Sandbox sandbox = 1;\n}\n\nmessage StoreDeleteRequest {\n  string sandbox_id = 1;\n}\n\nmessage StoreDeleteResponse {}\n\nmessage StoreListRequest {\n  repeated string filters = 1;\n}\n\nmessage StoreListResponse {\n  repeated containerd.types.Sandbox list = 1;\n}\n\nmessage StoreGetRequest {\n  string sandbox_id = 1;\n}\n\nmessage StoreGetResponse {\n  containerd.types.Sandbox sandbox = 1;\n}\n\n// Controller is an interface to manage runtime sandbox instances.\nservice Controller {\n  rpc Create(ControllerCreateRequest) returns (ControllerCreateResponse);\n  rpc Start(ControllerStartRequest) returns (ControllerStartResponse);\n  rpc Platform(ControllerPlatformRequest) returns (ControllerPlatformResponse);\n  rpc Stop(ControllerStopRequest) returns (ControllerStopResponse);\n  rpc Wait(ControllerWaitRequest) returns (ControllerWaitResponse);\n  rpc Status(ControllerStatusRequest) returns (ControllerStatusResponse);\n  rpc Shutdown(ControllerShutdownRequest) returns (ControllerShutdownResponse);\n  rpc Metrics(ControllerMetricsRequest) returns (ControllerMetricsResponse);\n  rpc Update(ControllerUpdateRequest) returns (ControllerUpdateResponse);\n}\n\nmessage ControllerCreateRequest {\n  string sandbox_id = 1;\n  repeated containerd.types.Mount rootfs = 2;\n  google.protobuf.Any options = 3;\n  string netns_path = 4;\n  map<string, string> annotations = 5;\n  containerd.types.Sandbox sandbox = 6;\n  string sandboxer = 10;\n}\n\nmessage ControllerCreateResponse {\n  string sandbox_id = 1;\n}\n\nmessage ControllerStartRequest {\n  string sandbox_id = 1;\n  string sandboxer = 10;\n}\n\nmessage ControllerStartResponse {\n  string sandbox_id = 1;\n  uint32 pid = 2;\n  google.protobuf.Timestamp created_at = 3;\n  map<string, string> labels = 4;\n  // Address of the sandbox for containerd to connect,\n  // for calling Task or other APIs serving in the sandbox.\n  // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://<vsock cid>:<port>.\n  string address = 5;\n  uint32 version = 6;\n  google.protobuf.Any spec = 7;\n}\n\nmessage ControllerPlatformRequest {\n  string sandbox_id = 1;\n  string sandboxer = 10;\n}\n\nmessage ControllerPlatformResponse {\n  containerd.types.Platform platform = 1;\n}\n\nmessage ControllerStopRequest {\n  string sandbox_id = 1;\n  uint32 timeout_secs = 2;\n  string sandboxer = 10;\n}\n\nmessage ControllerStopResponse {}\n\nmessage ControllerWaitRequest {\n  string sandbox_id = 1;\n  string sandboxer = 10;\n}\n\nmessage ControllerWaitResponse {\n  uint32 exit_status = 1;\n  google.protobuf.Timestamp exited_at = 2;\n}\n\nmessage ControllerStatusRequest {\n  string sandbox_id = 1;\n  bool verbose = 2;\n  string sandboxer = 10;\n}\n\nmessage ControllerStatusResponse {\n  string sandbox_id = 1;\n  uint32 pid = 2;\n  string state = 3;\n  map<string, string> info = 4;\n  google.protobuf.Timestamp created_at = 5;\n  google.protobuf.Timestamp exited_at = 6;\n  google.protobuf.Any extra = 7;\n  // Address of the sandbox for containerd to connect,\n  // for calling Task or other APIs serving in the sandbox.\n  // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://<vsock cid>:<port>.\n  string address = 8;\n  uint32 version = 9;\n}\n\nmessage ControllerShutdownRequest {\n  string sandbox_id = 1;\n  string sandboxer = 10;\n}\n\nmessage ControllerShutdownResponse {}\n\nmessage ControllerMetricsRequest {\n  string sandbox_id = 1;\n  string sandboxer = 10;\n}\n\nmessage ControllerMetricsResponse {\n  types.Metric metrics = 1;\n}\n\nmessage ControllerUpdateRequest {\n  string sandbox_id = 1;\n  string sandboxer = 2;\n  containerd.types.Sandbox sandbox = 3;\n  repeated string fields = 4;\n}\n\nmessage ControllerUpdateResponse {}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.snapshots.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/field_mask.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/mount.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/snapshots/v1;snapshots\";\n\n// Snapshot service manages snapshots\nservice Snapshots {\n  rpc Prepare(PrepareSnapshotRequest) returns (PrepareSnapshotResponse);\n  rpc View(ViewSnapshotRequest) returns (ViewSnapshotResponse);\n  rpc Mounts(MountsRequest) returns (MountsResponse);\n  rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty);\n  rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty);\n  rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse);\n  rpc Update(UpdateSnapshotRequest) returns (UpdateSnapshotResponse);\n  rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse);\n  rpc Usage(UsageRequest) returns (UsageResponse);\n  rpc Cleanup(CleanupRequest) returns (google.protobuf.Empty);\n}\n\nmessage PrepareSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n  string parent = 3;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 4;\n}\n\nmessage PrepareSnapshotResponse {\n  repeated containerd.types.Mount mounts = 1;\n}\n\nmessage ViewSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n  string parent = 3;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 4;\n}\n\nmessage ViewSnapshotResponse {\n  repeated containerd.types.Mount mounts = 1;\n}\n\nmessage MountsRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nmessage MountsResponse {\n  repeated containerd.types.Mount mounts = 1;\n}\n\nmessage RemoveSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nmessage CommitSnapshotRequest {\n  string snapshotter = 1;\n  string name = 2;\n  string key = 3;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 4;\n\n  string parent = 5;\n}\n\nmessage StatSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nenum Kind {\n  UNKNOWN = 0;\n  VIEW = 1;\n  ACTIVE = 2;\n  COMMITTED = 3;\n}\n\nmessage Info {\n  string name = 1;\n  string parent = 2;\n  Kind kind = 3;\n\n  // CreatedAt provides the time at which the snapshot was created.\n  google.protobuf.Timestamp created_at = 4;\n\n  // UpdatedAt provides the time the info was last updated.\n  google.protobuf.Timestamp updated_at = 5;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 6;\n}\n\nmessage StatSnapshotResponse {\n  Info info = 1;\n}\n\nmessage UpdateSnapshotRequest {\n  string snapshotter = 1;\n  Info info = 2;\n\n  // UpdateMask specifies which fields to perform the update on. If empty,\n  // the operation applies to all fields.\n  //\n  // In info, Name, Parent, Kind, Created are immutable,\n  // other field may be updated using this mask.\n  // If no mask is provided, all mutable field are updated.\n  google.protobuf.FieldMask update_mask = 3;\n}\n\nmessage UpdateSnapshotResponse {\n  Info info = 1;\n}\n\nmessage ListSnapshotsRequest {\n  string snapshotter = 1;\n\n  // Filters contains one or more filters using the syntax defined in the\n  // containerd filter package.\n  //\n  // The returned result will be those that match any of the provided\n  // filters. Expanded, images that match the following will be\n  // returned:\n  //\n  //\tfilters[0] or filters[1] or ... or filters[n-1] or filters[n]\n  //\n  // If filters is zero-length or nil, all items will be returned.\n  repeated string filters = 2;\n}\n\nmessage ListSnapshotsResponse {\n  repeated Info info = 1;\n}\n\nmessage UsageRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nmessage UsageResponse {\n  int64 size = 1;\n  int64 inodes = 2;\n}\n\nmessage CleanupRequest {\n  string snapshotter = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.streaming.v1;\n\nimport \"google/protobuf/any.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/streaming/v1;streaming\";\n\nservice Streaming {\n  rpc Stream(stream google.protobuf.Any) returns (stream google.protobuf.Any);\n}\n\nmessage StreamInit {\n  string id = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.tasks.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/descriptor.proto\";\nimport \"types/metrics.proto\";\nimport \"types/mount.proto\";\nimport \"types/task/task.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/tasks/v1;tasks\";\n\nservice Tasks {\n  // Create a task.\n  rpc Create(CreateTaskRequest) returns (CreateTaskResponse);\n\n  // Start a process.\n  rpc Start(StartRequest) returns (StartResponse);\n\n  // Delete a task and on disk state.\n  rpc Delete(DeleteTaskRequest) returns (DeleteResponse);\n\n  rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse);\n\n  rpc Get(GetRequest) returns (GetResponse);\n\n  rpc List(ListTasksRequest) returns (ListTasksResponse);\n\n  // Kill a task or process.\n  rpc Kill(KillRequest) returns (google.protobuf.Empty);\n\n  rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty);\n\n  rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty);\n\n  rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty);\n\n  rpc Pause(PauseTaskRequest) returns (google.protobuf.Empty);\n\n  rpc Resume(ResumeTaskRequest) returns (google.protobuf.Empty);\n\n  rpc ListPids(ListPidsRequest) returns (ListPidsResponse);\n\n  rpc Checkpoint(CheckpointTaskRequest) returns (CheckpointTaskResponse);\n\n  rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty);\n\n  rpc Metrics(MetricsRequest) returns (MetricsResponse);\n\n  rpc Wait(WaitRequest) returns (WaitResponse);\n}\n\nmessage CreateTaskRequest {\n  string container_id = 1;\n\n  // RootFS provides the pre-chroot mounts to perform in the shim before\n  // executing the container task.\n  //\n  // These are for mounts that cannot be performed in the user namespace.\n  // Typically, these mounts should be resolved from snapshots specified on\n  // the container object.\n  repeated containerd.types.Mount rootfs = 3;\n\n  string stdin = 4;\n  string stdout = 5;\n  string stderr = 6;\n  bool terminal = 7;\n\n  containerd.types.Descriptor checkpoint = 8;\n\n  google.protobuf.Any options = 9;\n\n  string runtime_path = 10;\n}\n\nmessage CreateTaskResponse {\n  string container_id = 1;\n  uint32 pid = 2;\n}\n\nmessage StartRequest {\n  string container_id = 1;\n  string exec_id = 2;\n}\n\nmessage StartResponse {\n  uint32 pid = 1;\n}\n\nmessage DeleteTaskRequest {\n  string container_id = 1;\n}\n\nmessage DeleteResponse {\n  string id = 1;\n  uint32 pid = 2;\n  uint32 exit_status = 3;\n  google.protobuf.Timestamp exited_at = 4;\n}\n\nmessage DeleteProcessRequest {\n  string container_id = 1;\n  string exec_id = 2;\n}\n\nmessage GetRequest {\n  string container_id = 1;\n  string exec_id = 2;\n}\n\nmessage GetResponse {\n  containerd.v1.types.Process process = 1;\n}\n\nmessage ListTasksRequest {\n  string filter = 1;\n}\n\nmessage ListTasksResponse {\n  repeated containerd.v1.types.Process tasks = 1;\n}\n\nmessage KillRequest {\n  string container_id = 1;\n  string exec_id = 2;\n  uint32 signal = 3;\n  bool all = 4;\n}\n\nmessage ExecProcessRequest {\n  string container_id = 1;\n  string stdin = 2;\n  string stdout = 3;\n  string stderr = 4;\n  bool terminal = 5;\n  // Spec for starting a process in the target container.\n  //\n  // For runc, this is a process spec, for example.\n  google.protobuf.Any spec = 6;\n  // id of the exec process\n  string exec_id = 7;\n}\n\nmessage ExecProcessResponse {}\n\nmessage ResizePtyRequest {\n  string container_id = 1;\n  string exec_id = 2;\n  uint32 width = 3;\n  uint32 height = 4;\n}\n\nmessage CloseIORequest {\n  string container_id = 1;\n  string exec_id = 2;\n  bool stdin = 3;\n}\n\nmessage PauseTaskRequest {\n  string container_id = 1;\n}\n\nmessage ResumeTaskRequest {\n  string container_id = 1;\n}\n\nmessage ListPidsRequest {\n  string container_id = 1;\n}\n\nmessage ListPidsResponse {\n  // Processes includes the process ID and additional process information\n  repeated containerd.v1.types.ProcessInfo processes = 1;\n}\n\nmessage CheckpointTaskRequest {\n  string container_id = 1;\n  string parent_checkpoint = 2;\n  google.protobuf.Any options = 3;\n}\n\nmessage CheckpointTaskResponse {\n  repeated containerd.types.Descriptor descriptors = 1;\n}\n\nmessage UpdateTaskRequest {\n  string container_id = 1;\n  google.protobuf.Any resources = 2;\n  map<string, string> annotations = 3;\n}\n\nmessage MetricsRequest {\n  repeated string filters = 1;\n}\n\nmessage MetricsResponse {\n  repeated types.Metric metrics = 1;\n}\n\nmessage WaitRequest {\n  string container_id = 1;\n  string exec_id = 2;\n}\n\nmessage WaitResponse {\n  uint32 exit_status = 1;\n  google.protobuf.Timestamp exited_at = 2;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.transfer.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/empty.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/transfer/v1;transfer\";\n\nservice Transfer {\n  rpc Transfer(TransferRequest) returns (google.protobuf.Empty);\n}\n\nmessage TransferRequest {\n  google.protobuf.Any source = 1;\n  google.protobuf.Any destination = 2;\n  TransferOptions options = 3;\n}\n\nmessage TransferOptions {\n  string progress_stream = 1;\n  // Progress min interval\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.events.ttrpc.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"types/event.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/ttrpc/events/v1;events\";\n\nservice Events {\n  // Forward sends an event that has already been packaged into an envelope\n  // with a timestamp and namespace.\n  //\n  // This is useful if earlier timestamping is required or when forwarding on\n  // behalf of another component, namespace or publisher.\n  rpc Forward(ForwardRequest) returns (google.protobuf.Empty);\n}\n\nmessage ForwardRequest {\n  containerd.types.Envelope envelope = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.version.v1;\n\nimport \"google/protobuf/empty.proto\";\n\n// TODO(stevvooe): Should version service actually be versioned?\noption go_package = \"github.com/containerd/containerd/api/services/version/v1;version\";\n\nservice Version {\n  rpc Version(google.protobuf.Empty) returns (VersionResponse);\n}\n\nmessage VersionResponse {\n  string version = 1;\n  string revision = 2;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/descriptor.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Descriptor describes a blob in a content store.\n//\n// This descriptor can be used to reference content from an\n// oci descriptor found in a manifest.\n// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor\nmessage Descriptor {\n  string media_type = 1;\n  string digest = 2;\n  int64 size = 3;\n  map<string, string> annotations = 5;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/event.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nmessage Envelope {\n  option (containerd.types.fieldpath) = true;\n  google.protobuf.Timestamp timestamp = 1;\n  string namespace = 2;\n  string topic = 3;\n  google.protobuf.Any event = 4;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/fieldpath.proto",
    "content": "// Protocol Buffers for Go with Gadgets\n//\n// Copyright (c) 2013, The GoGo Authors. All rights reserved.\n// http://github.com/gogo/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\npackage containerd.types;\n\nimport \"google/protobuf/descriptor.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nextend google.protobuf.FileOptions {\n  optional bool fieldpath_all = 63300;\n}\n\nextend google.protobuf.MessageOptions {\n  optional bool fieldpath = 64400;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/introspection.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nmessage RuntimeRequest {\n  string runtime_path = 1;\n  // Options correspond to CreateTaskRequest.options.\n  // This is needed to pass the runc binary path, etc.\n  google.protobuf.Any options = 2;\n}\n\nmessage RuntimeVersion {\n  string version = 1;\n  string revision = 2;\n}\n\nmessage RuntimeInfo {\n  string name = 1;\n  RuntimeVersion version = 2;\n  // Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.)\n  google.protobuf.Any options = 3;\n  // OCI-compatible runtimes should use https://github.com/opencontainers/runtime-spec/blob/main/features.md\n  google.protobuf.Any features = 4;\n  // Annotations of the shim. Irrelevant to features.Annotations.\n  map<string, string> annotations = 5;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/metrics.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nmessage Metric {\n  google.protobuf.Timestamp timestamp = 1;\n  string id = 2;\n  google.protobuf.Any data = 3;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/mount.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Mount describes mounts for a container.\n//\n// This type is the lingua franca of ContainerD. All services provide mounts\n// to be used with the container at creation time.\n//\n// The Mount type follows the structure of the mount syscall, including a type,\n// source, target and options.\nmessage Mount {\n  // Type defines the nature of the mount.\n  string type = 1;\n\n  // Source specifies the name of the mount. Depending on mount type, this\n  // may be a volume name or a host path, or even ignored.\n  string source = 2;\n\n  // Target path in container\n  string target = 3;\n\n  // Options specifies zero or more fstab style mount options.\n  repeated string options = 4;\n}\n\nmessage ActiveMount {\n  Mount mount = 1;\n\n  google.protobuf.Timestamp mounted_at = 2;\n\n  string mount_point = 3;\n\n  map<string, string> data = 4;\n}\n\nmessage ActivationInfo {\n  string name = 1;\n\n  repeated ActiveMount active = 2;\n\n  repeated Mount system = 3;\n\n  map<string, string> labels = 4;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/platform.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Platform follows the structure of the OCI platform specification, from\n// descriptors.\nmessage Platform {\n  string os = 1;\n  string architecture = 2;\n  string variant = 3;\n  string os_version = 4;\n  repeated string os_features = 5;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/runc/options/oci.proto",
    "content": "syntax = \"proto3\";\n\npackage containerd.runc.v1;\n\noption go_package = \"github.com/containerd/containerd/api/types/runc/options;options\";\n\nmessage Options {\n  // disable pivot root when creating a container\n  bool no_pivot_root = 1;\n  // create a new keyring for the container\n  bool no_new_keyring = 2;\n  // place the shim in a cgroup\n  string shim_cgroup = 3;\n  // set the I/O's pipes uid\n  uint32 io_uid = 4;\n  // set the I/O's pipes gid\n  uint32 io_gid = 5;\n  // binary name of the runc binary\n  string binary_name = 6;\n  // runc root directory\n  string root = 7;\n  // criu binary path.\n  //\n  // Removed in containerd v2.0: string criu_path = 8;\n  reserved 8;\n  // enable systemd cgroups\n  bool systemd_cgroup = 9;\n  // criu image path\n  string criu_image_path = 10;\n  // criu work path\n  string criu_work_path = 11;\n  // task api address, can be a unix domain socket, or vsock address.\n  // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://<vsock cid>:<port>.\n  string task_api_address = 12;\n  // task api version, currently supported value is 2 and 3.\n  uint32 task_api_version = 13;\n}\n\nmessage CheckpointOptions {\n  // exit the container after a checkpoint\n  bool exit = 1;\n  // checkpoint open tcp connections\n  bool open_tcp = 2;\n  // checkpoint external unix sockets\n  bool external_unix_sockets = 3;\n  // checkpoint terminals (ptys)\n  bool terminal = 4;\n  // allow checkpointing of file locks\n  bool file_locks = 5;\n  // restore provided namespaces as empty namespaces\n  repeated string empty_namespaces = 6;\n  // set the cgroups mode, soft, full, strict\n  string cgroups_mode = 7;\n  // checkpoint image path\n  string image_path = 8;\n  // checkpoint work path\n  string work_path = 9;\n}\n\nmessage ProcessDetails {\n  // exec process id if the process is managed by a shim\n  string exec_id = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/runtimeoptions/v1/api.proto",
    "content": "// To regenerate api.pb.go run `make protos`\nsyntax = \"proto3\";\n\npackage runtimeoptions.v1;\n\noption go_package = \"github.com/containerd/containerd/api/types/runtimeoptions/v1;runtimeoptions\";\n\nmessage Options {\n  // TypeUrl specifies the type of the content inside the config file.\n  string type_url = 1;\n  // ConfigPath specifies the filesystem location of the config file\n  // used by the runtime.\n  string config_path = 2;\n  // Blob specifies an in-memory TOML blob passed from containerd's configuration section\n  // for this runtime. This will be used if config_path is not specified.\n  bytes config_body = 3;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/sandbox.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Sandbox represents a sandbox metadata object that keeps all info required by controller to\n// work with a particular instance.\nmessage Sandbox {\n  // SandboxID is a unique instance identifier within namespace\n  string sandbox_id = 1;\n  message Runtime {\n    // Name is the name of the runtime.\n    string name = 1;\n    // Options specify additional runtime initialization options for the shim (this data will be available in StartShim).\n    // Typically this data expected to be runtime shim implementation specific.\n    google.protobuf.Any options = 2;\n  }\n  // Runtime specifies which runtime to use for executing this container.\n  Runtime runtime = 2;\n  // Spec is sandbox configuration (kin of OCI runtime spec), spec's data will be written to a config.json file in the\n  // bundle directory (similary to OCI spec).\n  google.protobuf.Any spec = 3;\n  // Labels provides an area to include arbitrary data on containers.\n  map<string, string> labels = 4;\n  // CreatedAt is the time the container was first created.\n  google.protobuf.Timestamp created_at = 5;\n  // UpdatedAt is the last time the container was mutated.\n  google.protobuf.Timestamp updated_at = 6;\n  // Extensions allow clients to provide optional blobs that can be handled by runtime.\n  map<string, google.protobuf.Any> extensions = 7;\n  // Sandboxer is the name of the sandbox controller who manages the sandbox.\n  string sandboxer = 10;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/task/task.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.v1.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types/task\";\n\nenum Status {\n  UNKNOWN = 0;\n  CREATED = 1;\n  RUNNING = 2;\n  STOPPED = 3;\n  PAUSED = 4;\n  PAUSING = 5;\n}\n\nmessage Process {\n  string container_id = 1;\n  string id = 2;\n  uint32 pid = 3;\n  Status status = 4;\n  string stdin = 5;\n  string stdout = 6;\n  string stderr = 7;\n  bool terminal = 8;\n  uint32 exit_status = 9;\n  google.protobuf.Timestamp exited_at = 10;\n}\n\nmessage ProcessInfo {\n  // PID is the process ID.\n  uint32 pid = 1;\n  // Info contains additional process information.\n  //\n  // Info varies by platform.\n  google.protobuf.Any info = 2;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/transfer/container.proto",
    "content": "/*\n\tCopyright The containerd Authors.\n\n\tLicensed under the Apache License, Version 2.0 (the \"License\");\n\tyou may not use this file except in compliance with the License.\n\tYou may obtain a copy of the License at\n\n\t\thttp://www.apache.org/licenses/LICENSE-2.0\n\n\tUnless required by applicable law or agreed to in writing, software\n\tdistributed under the License is distributed on an \"AS IS\" BASIS,\n\tWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\tSee the License for the specific language governing permissions and\n\tlimitations under the License.\n*/\nsyntax = \"proto3\";\n\n\npackage containerd.types.transfer;\n\noption go_package = \"github.com/containerd/containerd/api/types/transfer\";\n\n// ContainerPath represents a path within an active container's\n// filesystem. It acts as either a source or destination in a transfer\n// operation, identifying the container and path for archive operations.\nmessage ContainerPath {\n\tstring container_id = 1;\n\tstring path = 2;\n\n\t// When true and path is a directory, return only the directory entry\n\t// itself without walking into its contents. This is useful for\n\t// stat-like operations where only the directory's metadata is needed.\n\tbool no_walk = 3;\n\n\t// When true, preserve the UID/GID from tar headers when extracting\n\t// files. When false, extracted files are owned by the extracting\n\t// process.\n\tbool preserve_ownership = 4;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types.transfer;\n\nimport \"types/platform.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types/transfer\";\n\nmessage ImageStore {\n  string name = 1;\n  map<string, string> labels = 2;\n\n  // Content filters\n\n  repeated types.Platform platforms = 3;\n  bool all_metadata = 4;\n  uint32 manifest_limit = 5;\n\n  // Import naming\n\n  // extra_references are used to set image names on imports of sub-images from the index\n  repeated ImageReference extra_references = 6;\n\n  // Unpack Configuration, multiple allowed\n\n  repeated UnpackConfiguration unpacks = 10;\n}\n\nmessage UnpackConfiguration {\n  // platform is the platform to unpack for, used for resolving manifest and snapshotter\n  // if not provided\n  types.Platform platform = 1;\n\n  // snapshotter to unpack to, if not provided default for platform shoudl be used\n  string snapshotter = 2;\n}\n\n// ImageReference is used to create or find a reference for an image\nmessage ImageReference {\n  string name = 1;\n\n  // is_prefix determines whether the Name should be considered\n  // a prefix (without tag or digest).\n  // For lookup, this may allow matching multiple tags.\n  // For store, this must have a tag or digest added.\n  bool is_prefix = 2;\n\n  // allow_overwrite allows overwriting or ignoring the name if\n  // another reference is provided (such as through an annotation).\n  // Only used if IsPrefix is true.\n  bool allow_overwrite = 3;\n\n  // add_digest adds the manifest digest to the reference.\n  // For lookup, this allows matching tags with any digest.\n  // For store, this allows adding the digest to the name.\n  // Only used if IsPrefix is true.\n  bool add_digest = 4;\n\n  // skip_named_digest only considers digest references which do not\n  // have a non-digested named reference.\n  // For lookup, this will deduplicate digest references when there is a named match.\n  // For store, this only adds this digest reference when there is no matching full\n  // name reference from the prefix.\n  // Only used if IsPrefix is true.\n  bool skip_named_digest = 5;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/transfer/importexport.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types.transfer;\n\nimport \"types/platform.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types/transfer\";\n\nmessage ImageImportStream {\n  // Stream is used to identify the binary input stream for the import operation.\n  // The stream uses the transfer binary stream protocol with the client as the sender.\n  // The binary data is expected to be a raw tar stream.\n  string stream = 1;\n\n  string media_type = 2;\n\n  bool force_compress = 3;\n}\n\nmessage ImageExportStream {\n  // Stream is used to identify the binary output stream for the export operation.\n  // The stream uses the transfer binary stream protocol with the server as the sender.\n  // The binary data is expected to be a raw tar stream.\n  string stream = 1;\n\n  string media_type = 2;\n\n  // The specified platforms\n  repeated types.Platform platforms = 3;\n  // Whether to include all platforms\n  bool all_platforms = 4;\n  // Skips the creation of the Docker compatible manifest.json file\n  bool skip_compatibility_manifest = 5;\n  // Excludes non-distributable blobs such as Windows base layers.\n  bool skip_non_distributable = 6;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/transfer/progress.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types.transfer;\n\nimport \"types/descriptor.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types/transfer\";\n\nmessage Progress {\n  string event = 1;\n  string name = 2;\n  repeated string parents = 3;\n  int64 progress = 4;\n  int64 total = 5;\n  containerd.types.Descriptor desc = 6;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/transfer/registry.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types.transfer;\n\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types/transfer\";\n\nmessage OCIRegistry {\n  string reference = 1;\n  RegistryResolver resolver = 2;\n}\n\nenum HTTPDebug {\n  DISABLED = 0;\n  // Enable HTTP debugging\n  DEBUG = 1;\n  // Enable HTTP requests tracing\n  TRACE = 2;\n  // Enable both HTTP debugging and requests tracing\n  BOTH = 3;\n}\n\nmessage RegistryResolver {\n  // auth_stream is used to refer to a stream which auth callbacks may be\n  // made on.\n  string auth_stream = 1;\n\n  // Headers\n  map<string, string> headers = 2;\n\n  string host_dir = 3;\n\n  string default_scheme = 4;\n  // Force skip verify\n  // CA callback? Client TLS callback?\n\n  // Whether to debug/trace HTTP requests to OCI registry.\n  HTTPDebug http_debug = 5;\n\n  // Stream ID to use for HTTP logs (when logs are streamed to client).\n  // When empty, logs are written to containerd logs.\n  string logs_stream = 6;\n}\n\n// AuthRequest is sent as a callback on a stream\nmessage AuthRequest {\n  // host is the registry host\n  string host = 1;\n\n  // reference is the namespace and repository name requested from the registry\n  string reference = 2;\n\n  // wwwauthenticate is the HTTP WWW-Authenticate header values returned from the registry\n  repeated string wwwauthenticate = 3;\n}\n\nenum AuthType {\n  NONE = 0;\n\n  // CREDENTIALS is used to exchange username/password for access token\n  // using an oauth or \"Docker Registry Token\" server\n  CREDENTIALS = 1;\n\n  // REFRESH is used to exchange secret for access token using an oauth\n  // or \"Docker Registry Token\" server\n  REFRESH = 2;\n\n  // HEADER is used to set the HTTP Authorization header to secret\n  // directly for the registry.\n  // Value should be `<auth-scheme> <authorization-parameters>`\n  HEADER = 3;\n}\n\nmessage AuthResponse {\n  AuthType authType = 1;\n  string secret = 2;\n  string username = 3;\n  google.protobuf.Timestamp expire_at = 4;\n  // TODO: Stream error\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/api/types/transfer/streaming.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types.transfer;\n\noption go_package = \"github.com/containerd/containerd/api/types/transfer\";\n\nmessage Data {\n  bytes data = 1;\n}\n\nmessage WindowUpdate {\n  int32 update = 1;\n}\n\n// ReadStream carries data from the client to the server (import\n// direction). The client sends data through the stream and the\n// server reads it.\nmessage ReadStream {\n\tstring stream = 1;\n\tstring media_type = 2;\n}\n\n// WriteStream carries data from the server to the client (export\n// direction). The server writes data into the stream and the\n// client receives it.\nmessage WriteStream {\n\tstring stream = 1;\n\tstring media_type = 2;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/container.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"google/protobuf/any.proto\";\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage ContainerCreate {\n  string id = 1;\n  string image = 2;\n  message Runtime {\n    string name = 1;\n    google.protobuf.Any options = 2;\n  }\n  Runtime runtime = 3;\n}\n\nmessage ContainerUpdate {\n  string id = 1;\n  string image = 2;\n  map<string, string> labels = 3;\n  string snapshot_key = 4;\n}\n\nmessage ContainerDelete {\n  string id = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/content.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage ContentCreate {\n  string digest = 1;\n  int64 size = 2;\n}\n\nmessage ContentDelete {\n  string digest = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/image.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.images.v1;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage ImageCreate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage ImageUpdate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage ImageDelete {\n  string name = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/namespace.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage NamespaceCreate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage NamespaceUpdate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage NamespaceDelete {\n  string name = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/sandbox.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\n\nmessage SandboxCreate {\n  string sandbox_id = 1;\n}\n\nmessage SandboxStart {\n  string sandbox_id = 1;\n}\n\nmessage SandboxExit {\n  string sandbox_id = 1;\n  uint32 exit_status = 2;\n  google.protobuf.Timestamp exited_at = 3;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/snapshot.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage SnapshotPrepare {\n  string key = 1;\n  string parent = 2;\n  string snapshotter = 5;\n}\n\nmessage SnapshotCommit {\n  string key = 1;\n  string name = 2;\n  string snapshotter = 5;\n}\n\nmessage SnapshotRemove {\n  string key = 1;\n  string snapshotter = 5;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/task.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/fieldpath.proto\";\nimport \"types/mount.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage TaskCreate {\n  string container_id = 1;\n  string bundle = 2;\n  repeated containerd.types.Mount rootfs = 3;\n  TaskIO io = 4;\n  string checkpoint = 5;\n  uint32 pid = 6;\n}\n\nmessage TaskStart {\n  string container_id = 1;\n  uint32 pid = 2;\n}\n\nmessage TaskDelete {\n  string container_id = 1;\n  uint32 pid = 2;\n  uint32 exit_status = 3;\n  google.protobuf.Timestamp exited_at = 4;\n  // id is the specific exec. By default if omitted will be `\"\"` thus matches\n  // the init exec of the task matching `container_id`.\n  string id = 5;\n}\n\nmessage TaskIO {\n  string stdin = 1;\n  string stdout = 2;\n  string stderr = 3;\n  bool terminal = 4;\n}\n\nmessage TaskExit {\n  string container_id = 1;\n  string id = 2;\n  uint32 pid = 3;\n  uint32 exit_status = 4;\n  google.protobuf.Timestamp exited_at = 5;\n}\n\nmessage TaskOOM {\n  string container_id = 1;\n}\n\nmessage TaskExecAdded {\n  string container_id = 1;\n  string exec_id = 2;\n}\n\nmessage TaskExecStarted {\n  string container_id = 1;\n  string exec_id = 2;\n  uint32 pid = 3;\n}\n\nmessage TaskPaused {\n  string container_id = 1;\n}\n\nmessage TaskResumed {\n  string container_id = 1;\n}\n\nmessage TaskCheckpointed {\n  string container_id = 1;\n  string checkpoint = 2;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.containers.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/field_mask.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/containers/v1;containers\";\n\n// Containers provides metadata storage for containers used in the execution\n// service.\n//\n// The objects here provide an state-independent view of containers for use in\n// management and resource pinning. From that perspective, containers do not\n// have a \"state\" but rather this is the set of resources that will be\n// considered in use by the container.\n//\n// From the perspective of the execution service, these objects represent the\n// base parameters for creating a container process.\n//\n// In general, when looking to add fields for this type, first ask yourself\n// whether or not the function of the field has to do with runtime execution or\n// is invariant of the runtime state of the container. If it has to do with\n// runtime, or changes as the \"container\" is started and stops, it probably\n// doesn't belong on this object.\nservice Containers {\n  rpc Get(GetContainerRequest) returns (GetContainerResponse);\n  rpc List(ListContainersRequest) returns (ListContainersResponse);\n  rpc ListStream(ListContainersRequest) returns (stream ListContainerMessage);\n  rpc Create(CreateContainerRequest) returns (CreateContainerResponse);\n  rpc Update(UpdateContainerRequest) returns (UpdateContainerResponse);\n  rpc Delete(DeleteContainerRequest) returns (google.protobuf.Empty);\n}\n\nmessage Container {\n  // ID is the user-specified identifier.\n  //\n  // This field may not be updated.\n  string id = 1;\n\n  // Labels provides an area to include arbitrary data on containers.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  //\n  // Note that to add a new value to this field, read the existing set and\n  // include the entire result in the update call.\n  map<string, string> labels = 2;\n\n  // Image contains the reference of the image used to build the\n  // specification and snapshots for running this container.\n  //\n  // If this field is updated, the spec and rootfs needed to updated, as well.\n  string image = 3;\n\n  message Runtime {\n    // Name is the name of the runtime.\n    string name = 1;\n    // Options specify additional runtime initialization options.\n    google.protobuf.Any options = 2;\n  }\n  // Runtime specifies which runtime to use for executing this container.\n  Runtime runtime = 4;\n\n  // Spec to be used when creating the container. This is runtime specific.\n  google.protobuf.Any spec = 5;\n\n  // Snapshotter specifies the snapshotter name used for rootfs\n  string snapshotter = 6;\n\n  // SnapshotKey specifies the snapshot key to use for the container's root\n  // filesystem. When starting a task from this container, a caller should\n  // look up the mounts from the snapshot service and include those on the\n  // task create request.\n  //\n  // Snapshots referenced in this field will not be garbage collected.\n  //\n  // This field is set to empty when the rootfs is not a snapshot.\n  //\n  // This field may be updated.\n  string snapshot_key = 7;\n\n  // CreatedAt is the time the container was first created.\n  google.protobuf.Timestamp created_at = 8;\n\n  // UpdatedAt is the last time the container was mutated.\n  google.protobuf.Timestamp updated_at = 9;\n\n  // Extensions allow clients to provide zero or more blobs that are directly\n  // associated with the container. One may provide protobuf, json, or other\n  // encoding formats. The primary use of this is to further decorate the\n  // container object with fields that may be specific to a client integration.\n  //\n  // The key portion of this map should identify a \"name\" for the extension\n  // that should be unique against other extensions. When updating extension\n  // data, one should only update the specified extension using field paths\n  // to select a specific map key.\n  map<string, google.protobuf.Any> extensions = 10;\n\n  // Sandbox ID this container belongs to.\n  string sandbox = 11;\n}\n\nmessage GetContainerRequest {\n  string id = 1;\n}\n\nmessage GetContainerResponse {\n  Container container = 1;\n}\n\nmessage ListContainersRequest {\n  // Filters contains one or more filters using the syntax defined in the\n  // containerd filter package.\n  //\n  // The returned result will be those that match any of the provided\n  // filters. Expanded, containers that match the following will be\n  // returned:\n  //\n  //\tfilters[0] or filters[1] or ... or filters[n-1] or filters[n]\n  //\n  // If filters is zero-length or nil, all items will be returned.\n  repeated string filters = 1;\n}\n\nmessage ListContainersResponse {\n  repeated Container containers = 1;\n}\n\nmessage CreateContainerRequest {\n  Container container = 1;\n}\n\nmessage CreateContainerResponse {\n  Container container = 1;\n}\n\n// UpdateContainerRequest updates the metadata on one or more container.\n//\n// The operation should follow semantics described in\n// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,\n// unless otherwise qualified.\nmessage UpdateContainerRequest {\n  // Container provides the target values, as declared by the mask, for the update.\n  //\n  // The ID field must be set.\n  Container container = 1;\n\n  // UpdateMask specifies which fields to perform the update on. If empty,\n  // the operation applies to all fields.\n  google.protobuf.FieldMask update_mask = 2;\n}\n\nmessage UpdateContainerResponse {\n  Container container = 1;\n}\n\nmessage DeleteContainerRequest {\n  string id = 1;\n}\n\nmessage ListContainerMessage {\n  Container container = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.content.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/field_mask.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/content/v1;content\";\n\n// Content provides access to a content addressable storage system.\nservice Content {\n  // Info returns information about a committed object.\n  //\n  // This call can be used for getting the size of content and checking for\n  // existence.\n  rpc Info(InfoRequest) returns (InfoResponse);\n\n  // Update updates content metadata.\n  //\n  // This call can be used to manage the mutable content labels. The\n  // immutable metadata such as digest, size, and committed at cannot\n  // be updated.\n  rpc Update(UpdateRequest) returns (UpdateResponse);\n\n  // List streams the entire set of content as Info objects and closes the\n  // stream.\n  //\n  // Typically, this will yield a large response, chunked into messages.\n  // Clients should make provisions to ensure they can handle the entire data\n  // set.\n  rpc List(ListContentRequest) returns (stream ListContentResponse);\n\n  // Delete will delete the referenced object.\n  rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty);\n\n  // Read allows one to read an object based on the offset into the content.\n  //\n  // The requested data may be returned in one or more messages.\n  rpc Read(ReadContentRequest) returns (stream ReadContentResponse);\n\n  // Status returns the status for a single reference.\n  rpc Status(StatusRequest) returns (StatusResponse);\n\n  // ListStatuses returns the status of ongoing object ingestions, started via\n  // Write.\n  //\n  // Only those matching the regular expression will be provided in the\n  // response. If the provided regular expression is empty, all ingestions\n  // will be provided.\n  rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse);\n\n  // Write begins or resumes writes to a resource identified by a unique ref.\n  // Only one active stream may exist at a time for each ref.\n  //\n  // Once a write stream has started, it may only write to a single ref, thus\n  // once a stream is started, the ref may be omitted on subsequent writes.\n  //\n  // For any write transaction represented by a ref, only a single write may\n  // be made to a given offset. If overlapping writes occur, it is an error.\n  // Writes should be sequential and implementations may throw an error if\n  // this is required.\n  //\n  // If expected_digest is set and already part of the content store, the\n  // write will fail.\n  //\n  // When completed, the commit flag should be set to true. If expected size\n  // or digest is set, the content will be validated against those values.\n  rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse);\n\n  // Abort cancels the ongoing write named in the request. Any resources\n  // associated with the write will be collected.\n  rpc Abort(AbortRequest) returns (google.protobuf.Empty);\n}\n\nmessage Info {\n  // Digest is the hash identity of the blob.\n  string digest = 1;\n\n  // Size is the total number of bytes in the blob.\n  int64 size = 2;\n\n  // CreatedAt provides the time at which the blob was committed.\n  google.protobuf.Timestamp created_at = 3;\n\n  // UpdatedAt provides the time the info was last updated.\n  google.protobuf.Timestamp updated_at = 4;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 5;\n}\n\nmessage InfoRequest {\n  string digest = 1;\n}\n\nmessage InfoResponse {\n  Info info = 1;\n}\n\nmessage UpdateRequest {\n  Info info = 1;\n\n  // UpdateMask specifies which fields to perform the update on. If empty,\n  // the operation applies to all fields.\n  //\n  // In info, Digest, Size, and CreatedAt are immutable,\n  // other field may be updated using this mask.\n  // If no mask is provided, all mutable field are updated.\n  google.protobuf.FieldMask update_mask = 2;\n}\n\nmessage UpdateResponse {\n  Info info = 1;\n}\n\nmessage ListContentRequest {\n  // Filters contains one or more filters using the syntax defined in the\n  // containerd filter package.\n  //\n  // The returned result will be those that match any of the provided\n  // filters. Expanded, containers that match the following will be\n  // returned:\n  //\n  //\tfilters[0] or filters[1] or ... or filters[n-1] or filters[n]\n  //\n  // If filters is zero-length or nil, all items will be returned.\n  repeated string filters = 1;\n}\n\nmessage ListContentResponse {\n  repeated Info info = 1;\n}\n\nmessage DeleteContentRequest {\n  // Digest specifies which content to delete.\n  string digest = 1;\n}\n\n// ReadContentRequest defines the fields that make up a request to read a portion of\n// data from a stored object.\nmessage ReadContentRequest {\n  // Digest is the hash identity to read.\n  string digest = 1;\n\n  // Offset specifies the number of bytes from the start at which to begin\n  // the read. If zero or less, the read will be from the start. This uses\n  // standard zero-indexed semantics.\n  int64 offset = 2;\n\n  // size is the total size of the read. If zero, the entire blob will be\n  // returned by the service.\n  int64 size = 3;\n}\n\n// ReadContentResponse carries byte data for a read request.\nmessage ReadContentResponse {\n  int64 offset = 1; // offset of the returned data\n  bytes data = 2; // actual data\n}\n\nmessage Status {\n  google.protobuf.Timestamp started_at = 1;\n  google.protobuf.Timestamp updated_at = 2;\n  string ref = 3;\n  int64 offset = 4;\n  int64 total = 5;\n  string expected = 6;\n}\n\nmessage StatusRequest {\n  string ref = 1;\n}\n\nmessage StatusResponse {\n  Status status = 1;\n}\n\nmessage ListStatusesRequest {\n  repeated string filters = 1;\n}\n\nmessage ListStatusesResponse {\n  repeated Status statuses = 1;\n}\n\n// WriteAction defines the behavior of a WriteRequest.\nenum WriteAction {\n  // WriteActionStat instructs the writer to return the current status while\n  // holding the lock on the write.\n  STAT = 0;\n\n  // WriteActionWrite sets the action for the write request to write data.\n  //\n  // Any data included will be written at the provided offset. The\n  // transaction will be left open for further writes.\n  //\n  // This is the default.\n  WRITE = 1;\n\n  // WriteActionCommit will write any outstanding data in the message and\n  // commit the write, storing it under the digest.\n  //\n  // This can be used in a single message to send the data, verify it and\n  // commit it.\n  //\n  // This action will always terminate the write.\n  COMMIT = 2;\n}\n\n// WriteContentRequest writes data to the request ref at offset.\nmessage WriteContentRequest {\n  // Action sets the behavior of the write.\n  //\n  // When this is a write and the ref is not yet allocated, the ref will be\n  // allocated and the data will be written at offset.\n  //\n  // If the action is write and the ref is allocated, it will accept data to\n  // an offset that has not yet been written.\n  //\n  // If the action is write and there is no data, the current write status\n  // will be returned. This works differently from status because the stream\n  // holds a lock.\n  WriteAction action = 1;\n\n  // Ref identifies the pre-commit object to write to.\n  string ref = 2;\n\n  // Total can be set to have the service validate the total size of the\n  // committed content.\n  //\n  // The latest value before or with the commit action message will be use to\n  // validate the content. If the offset overflows total, the service may\n  // report an error. It is only required on one message for the write.\n  //\n  // If the value is zero or less, no validation of the final content will be\n  // performed.\n  int64 total = 3;\n\n  // Expected can be set to have the service validate the final content against\n  // the provided digest.\n  //\n  // If the digest is already present in the object store, an AlreadyExists\n  // error will be returned.\n  //\n  // Only the latest version will be used to check the content against the\n  // digest. It is only required to include it on a single message, before or\n  // with the commit action message.\n  string expected = 4;\n\n  // Offset specifies the number of bytes from the start at which to begin\n  // the write. For most implementations, this means from the start of the\n  // file. This uses standard, zero-indexed semantics.\n  //\n  // If the action is write, the remote may remove all previously written\n  // data after the offset. Implementations may support arbitrary offsets but\n  // MUST support reseting this value to zero with a write. If an\n  // implementation does not support a write at a particular offset, an\n  // OutOfRange error must be returned.\n  int64 offset = 5;\n\n  // Data is the actual bytes to be written.\n  //\n  // If this is empty and the message is not a commit, a response will be\n  // returned with the current write state.\n  bytes data = 6;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 7;\n}\n\n// WriteContentResponse is returned on the culmination of a write call.\nmessage WriteContentResponse {\n  // Action contains the action for the final message of the stream. A writer\n  // should confirm that they match the intended result.\n  WriteAction action = 1;\n\n  // StartedAt provides the time at which the write began.\n  //\n  // This must be set for stat and commit write actions. All other write\n  // actions may omit this.\n  google.protobuf.Timestamp started_at = 2;\n\n  // UpdatedAt provides the last time of a successful write.\n  //\n  // This must be set for stat and commit write actions. All other write\n  // actions may omit this.\n  google.protobuf.Timestamp updated_at = 3;\n\n  // Offset is the current committed size for the write.\n  int64 offset = 4;\n\n  // Total provides the current, expected total size of the write.\n  //\n  // We include this to provide consistency with the Status structure on the\n  // client writer.\n  //\n  // This is only valid on the Stat and Commit response.\n  int64 total = 5;\n\n  // Digest, if present, includes the digest up to the currently committed\n  // bytes. If action is commit, this field will be set. It is implementation\n  // defined if this is set for other actions.\n  string digest = 6;\n}\n\nmessage AbortRequest {\n  string ref = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.diff.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/descriptor.proto\";\nimport \"types/mount.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/diff/v1;diff\";\n\n// Diff service creates and applies diffs\nservice Diff {\n  // Apply applies the content associated with the provided digests onto\n  // the provided mounts. Archive content will be extracted and\n  // decompressed if necessary.\n  rpc Apply(ApplyRequest) returns (ApplyResponse);\n\n  // Diff creates a diff between the given mounts and uploads the result\n  // to the content store.\n  rpc Diff(DiffRequest) returns (DiffResponse);\n}\n\nmessage ApplyRequest {\n  // Diff is the descriptor of the diff to be extracted\n  containerd.types.Descriptor diff = 1;\n\n  repeated containerd.types.Mount mounts = 2;\n\n  map<string, google.protobuf.Any> payloads = 3;\n  // SyncFs is to synchronize the underlying filesystem containing files.\n  bool sync_fs = 4;\n}\n\nmessage ApplyResponse {\n  // Applied is the descriptor for the object which was applied.\n  // If the input was a compressed blob then the result will be\n  // the descriptor for the uncompressed blob.\n  containerd.types.Descriptor applied = 1;\n}\n\nmessage DiffRequest {\n  // Left are the mounts which represent the older copy\n  // in which is the base of the computed changes.\n  repeated containerd.types.Mount left = 1;\n\n  // Right are the mounts which represents the newer copy\n  // in which changes from the left were made into.\n  repeated containerd.types.Mount right = 2;\n\n  // MediaType is the media type descriptor for the created diff\n  // object\n  string media_type = 3;\n\n  // Ref identifies the pre-commit content store object. This\n  // reference can be used to get the status from the content store.\n  string ref = 4;\n\n  // Labels are the labels to apply to the generated content\n  // on content store commit.\n  map<string, string> labels = 5;\n\n  // SourceDateEpoch specifies the timestamp used to provide control for reproducibility.\n  // See also https://reproducible-builds.org/docs/source-date-epoch/ .\n  //\n  // Since containerd v2.0, the whiteout timestamps are set to zero (1970-01-01),\n  // not to the source date epoch.\n  google.protobuf.Timestamp source_date_epoch = 6;\n}\n\nmessage DiffResponse {\n  // Diff is the descriptor of the diff which can be applied\n  containerd.types.Descriptor diff = 3;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.events.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"types/event.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/events/v1;events\";\n\nservice Events {\n  // Publish an event to a topic.\n  //\n  // The event will be packed into a timestamp envelope with the namespace\n  // introspected from the context. The envelope will then be dispatched.\n  rpc Publish(PublishRequest) returns (google.protobuf.Empty);\n\n  // Forward sends an event that has already been packaged into an envelope\n  // with a timestamp and namespace.\n  //\n  // This is useful if earlier timestamping is required or when forwarding on\n  // behalf of another component, namespace or publisher.\n  rpc Forward(ForwardRequest) returns (google.protobuf.Empty);\n\n  // Subscribe to a stream of events, possibly returning only that match any\n  // of the provided filters.\n  //\n  // Unlike many other methods in containerd, subscribers will get messages\n  // from all namespaces unless otherwise specified. If this is not desired,\n  // a filter can be provided in the format 'namespace==<namespace>' to\n  // restrict the received events.\n  rpc Subscribe(SubscribeRequest) returns (stream containerd.types.Envelope);\n}\n\nmessage PublishRequest {\n  string topic = 1;\n  google.protobuf.Any event = 2;\n}\n\nmessage ForwardRequest {\n  containerd.types.Envelope envelope = 1;\n}\n\nmessage SubscribeRequest {\n  repeated string filters = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.images.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/field_mask.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/descriptor.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/images/v1;images\";\n\n// Images is a service that allows one to register images with containerd.\n//\n// In containerd, an image is merely the mapping of a name to a content root,\n// described by a descriptor. The behavior and state of image is purely\n// dictated by the type of the descriptor.\n//\n// From the perspective of this service, these references are mostly shallow,\n// in that the existence of the required content won't be validated until\n// required by consuming services.\n//\n// As such, this can really be considered a \"metadata service\".\nservice Images {\n  // Get returns an image by name.\n  rpc Get(GetImageRequest) returns (GetImageResponse);\n\n  // List returns a list of all images known to containerd.\n  rpc List(ListImagesRequest) returns (ListImagesResponse);\n\n  // Create an image record in the metadata store.\n  //\n  // The name of the image must be unique.\n  rpc Create(CreateImageRequest) returns (CreateImageResponse);\n\n  // Update assigns the name to a given target image based on the provided\n  // image.\n  rpc Update(UpdateImageRequest) returns (UpdateImageResponse);\n\n  // Delete deletes the image by name.\n  rpc Delete(DeleteImageRequest) returns (google.protobuf.Empty);\n}\n\nmessage Image {\n  // Name provides a unique name for the image.\n  //\n  // Containerd treats this as the primary identifier.\n  string name = 1;\n\n  // Labels provides free form labels for the image. These are runtime only\n  // and do not get inherited into the package image in any way.\n  //\n  // Labels may be updated using the field mask.\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 2;\n\n  // Target describes the content entry point of the image.\n  containerd.types.Descriptor target = 3;\n\n  // CreatedAt is the time the image was first created.\n  google.protobuf.Timestamp created_at = 7;\n\n  // UpdatedAt is the last time the image was mutated.\n  google.protobuf.Timestamp updated_at = 8;\n}\n\nmessage GetImageRequest {\n  string name = 1;\n}\n\nmessage GetImageResponse {\n  Image image = 1;\n}\n\nmessage CreateImageRequest {\n  Image image = 1;\n\n  google.protobuf.Timestamp source_date_epoch = 2;\n}\n\nmessage CreateImageResponse {\n  Image image = 1;\n}\n\nmessage UpdateImageRequest {\n  // Image provides a full or partial image for update.\n  //\n  // The name field must be set or an error will be returned.\n  Image image = 1;\n\n  // UpdateMask specifies which fields to perform the update on. If empty,\n  // the operation applies to all fields.\n  google.protobuf.FieldMask update_mask = 2;\n\n  google.protobuf.Timestamp source_date_epoch = 3;\n}\n\nmessage UpdateImageResponse {\n  Image image = 1;\n}\n\nmessage ListImagesRequest {\n  // Filters contains one or more filters using the syntax defined in the\n  // containerd filter package.\n  //\n  // The returned result will be those that match any of the provided\n  // filters. Expanded, images that match the following will be\n  // returned:\n  //\n  //\tfilters[0] or filters[1] or ... or filters[n-1] or filters[n]\n  //\n  // If filters is zero-length or nil, all items will be returned.\n  repeated string filters = 1;\n}\n\nmessage ListImagesResponse {\n  repeated Image images = 1;\n}\n\nmessage DeleteImageRequest {\n  string name = 1;\n\n  // Sync indicates that the delete and cleanup should be done\n  // synchronously before returning to the caller\n  //\n  // Default is false\n  bool sync = 2;\n\n  // Target value for image to be deleted\n  //\n  // If image descriptor does not match the same digest,\n  // the delete operation will return \"not found\" error.\n  optional containerd.types.Descriptor target = 3;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.introspection.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"google/rpc/status.proto\";\nimport \"types/introspection.proto\";\nimport \"types/platform.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/introspection/v1;introspection\";\n\nservice Introspection {\n  // Plugins returns a list of plugins in containerd.\n  //\n  // Clients can use this to detect features and capabilities when using\n  // containerd.\n  rpc Plugins(PluginsRequest) returns (PluginsResponse);\n  // Server returns information about the containerd server\n  rpc Server(google.protobuf.Empty) returns (ServerResponse);\n  // PluginInfo returns information directly from a plugin if the plugin supports it\n  rpc PluginInfo(PluginInfoRequest) returns (PluginInfoResponse);\n}\n\nmessage Plugin {\n  // Type defines the type of plugin.\n  //\n  // See package plugin for a list of possible values. Non core plugins may\n  // define their own values during registration.\n  string type = 1;\n\n  // ID identifies the plugin uniquely in the system.\n  string id = 2;\n\n  // Requires lists the plugin types required by this plugin.\n  repeated string requires = 3;\n\n  // Platforms enumerates the platforms this plugin will support.\n  //\n  // If values are provided here, the plugin will only be operable under the\n  // provided platforms.\n  //\n  // If this is empty, the plugin will work across all platforms.\n  //\n  // If the plugin prefers certain platforms over others, they should be\n  // listed from most to least preferred.\n  repeated types.Platform platforms = 4;\n\n  // Exports allows plugins to provide values about state or configuration to\n  // interested parties.\n  //\n  // One example is exposing the configured path of a snapshotter plugin.\n  map<string, string> exports = 5;\n\n  // Capabilities allows plugins to communicate feature switches to allow\n  // clients to detect features that may not be on be default or may be\n  // different from version to version.\n  //\n  // Use this sparingly.\n  repeated string capabilities = 6;\n\n  // InitErr will be set if the plugin fails initialization.\n  //\n  // This means the plugin may have been registered but a non-terminal error\n  // was encountered during initialization.\n  //\n  // Plugins that have this value set cannot be used.\n  google.rpc.Status init_err = 7;\n}\n\nmessage PluginsRequest {\n  // Filters contains one or more filters using the syntax defined in the\n  // containerd filter package.\n  //\n  // The returned result will be those that match any of the provided\n  // filters. Expanded, plugins that match the following will be\n  // returned:\n  //\n  //\tfilters[0] or filters[1] or ... or filters[n-1] or filters[n]\n  //\n  // If filters is zero-length or nil, all items will be returned.\n  repeated string filters = 1;\n}\n\nmessage PluginsResponse {\n  repeated Plugin plugins = 1;\n}\n\nmessage ServerResponse {\n  string uuid = 1;\n  uint64 pid = 2;\n  uint64 pidns = 3; // PID namespace, such as 4026531836\n  repeated DeprecationWarning deprecations = 4;\n}\n\nmessage DeprecationWarning {\n  string id = 1;\n  string message = 2;\n  google.protobuf.Timestamp last_occurrence = 3;\n}\n\nmessage PluginInfoRequest {\n  string type = 1;\n  string id = 2;\n\n  // Options may be used to request extra dynamic information from\n  // a plugin.\n  // This object is determined by the plugin and the plugin may return\n  // NotImplemented or InvalidArgument if it is not supported\n  google.protobuf.Any options = 3;\n}\n\nmessage PluginInfoResponse {\n  Plugin plugin = 1;\n  google.protobuf.Any extra = 2;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\nsyntax = \"proto3\";\n\npackage containerd.services.leases.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/leases/v1;leases\";\n\n// Leases service manages resources leases within the metadata store.\nservice Leases {\n  // Create creates a new lease for managing changes to metadata. A lease\n  // can be used to protect objects from being removed.\n  rpc Create(CreateRequest) returns (CreateResponse);\n\n  // Delete deletes the lease and makes any unreferenced objects created\n  // during the lease eligible for garbage collection if not referenced\n  // or retained by other resources during the lease.\n  rpc Delete(DeleteRequest) returns (google.protobuf.Empty);\n\n  // List lists all active leases, returning the full list of\n  // leases and optionally including the referenced resources.\n  rpc List(ListRequest) returns (ListResponse);\n\n  // AddResource references the resource by the provided lease.\n  rpc AddResource(AddResourceRequest) returns (google.protobuf.Empty);\n\n  // DeleteResource dereferences the resource by the provided lease.\n  rpc DeleteResource(DeleteResourceRequest) returns (google.protobuf.Empty);\n\n  // ListResources lists all the resources referenced by the lease.\n  rpc ListResources(ListResourcesRequest) returns (ListResourcesResponse);\n}\n\n// Lease is an object which retains resources while it exists.\nmessage Lease {\n  string id = 1;\n\n  google.protobuf.Timestamp created_at = 2;\n\n  map<string, string> labels = 3;\n}\n\nmessage CreateRequest {\n  // ID is used to identity the lease, when the id is not set the service\n  // generates a random identifier for the lease.\n  string id = 1;\n\n  map<string, string> labels = 3;\n}\n\nmessage CreateResponse {\n  Lease lease = 1;\n}\n\nmessage DeleteRequest {\n  string id = 1;\n\n  // Sync indicates that the delete and cleanup should be done\n  // synchronously before returning to the caller\n  //\n  // Default is false\n  bool sync = 2;\n}\n\nmessage ListRequest {\n  repeated string filters = 1;\n}\n\nmessage ListResponse {\n  repeated Lease leases = 1;\n}\n\nmessage Resource {\n  string id = 1;\n\n  // For snapshotter resource, there are many snapshotter types here, like\n  // overlayfs, devmapper etc. The type will be formatted with type,\n  // like \"snapshotter/overlayfs\".\n  string type = 2;\n}\n\nmessage AddResourceRequest {\n  string id = 1;\n\n  Resource resource = 2;\n}\n\nmessage DeleteResourceRequest {\n  string id = 1;\n\n  Resource resource = 2;\n}\n\nmessage ListResourcesRequest {\n  string id = 1;\n}\n\nmessage ListResourcesResponse {\n  repeated Resource resources = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/mounts/v1/mounts.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\nsyntax = \"proto3\";\n\npackage containerd.services.mounts.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/field_mask.proto\";\nimport \"types/mount.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/mounts/v1;mounts\";\n\n// Mounts service manages mounts\nservice Mounts {\n  rpc Activate(ActivateRequest) returns (ActivateResponse);\n  rpc Deactivate(DeactivateRequest) returns (google.protobuf.Empty);\n  rpc Info(InfoRequest) returns (InfoResponse);\n  rpc Update(UpdateRequest) returns (UpdateResponse);\n  rpc List(ListRequest) returns (stream ListMessage);\n}\n\nmessage ActivateRequest {\n  string name = 1;\n\n  repeated containerd.types.Mount mounts = 2;\n\n  map<string, string> labels = 3;\n\n  bool temporary = 4;\n}\n\nmessage ActivateResponse {\n  containerd.types.ActivationInfo info = 1;\n}\n\nmessage DeactivateRequest {\n  string name = 1;\n}\n\nmessage InfoRequest {\n  string name = 1;\n}\n\nmessage InfoResponse {\n  containerd.types.ActivationInfo info = 1;\n}\n\nmessage UpdateRequest {\n  containerd.types.ActivationInfo info = 1;\n\n  google.protobuf.FieldMask update_mask = 2;\n}\n\nmessage UpdateResponse {\n  containerd.types.ActivationInfo info = 1;\n}\n\nmessage ListRequest {\n  repeated string filters = 1;\n}\n\nmessage ListMessage {\n  containerd.types.ActivationInfo info = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.namespaces.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/field_mask.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/namespaces/v1;namespaces\";\n\n// Namespaces provides the ability to manipulate containerd namespaces.\n//\n// All objects in the system are required to be a member of a namespace. If a\n// namespace is deleted, all objects, including containers, images and\n// snapshots, will be deleted, as well.\n//\n// Unless otherwise noted, operations in containerd apply only to the namespace\n// supplied per request.\n//\n// I hope this goes without saying, but namespaces are themselves NOT\n// namespaced.\nservice Namespaces {\n  rpc Get(GetNamespaceRequest) returns (GetNamespaceResponse);\n  rpc List(ListNamespacesRequest) returns (ListNamespacesResponse);\n  rpc Create(CreateNamespaceRequest) returns (CreateNamespaceResponse);\n  rpc Update(UpdateNamespaceRequest) returns (UpdateNamespaceResponse);\n  rpc Delete(DeleteNamespaceRequest) returns (google.protobuf.Empty);\n}\n\nmessage Namespace {\n  string name = 1;\n\n  // Labels provides an area to include arbitrary data on namespaces.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  //\n  // Note that to add a new value to this field, read the existing set and\n  // include the entire result in the update call.\n  map<string, string> labels = 2;\n}\n\nmessage GetNamespaceRequest {\n  string name = 1;\n}\n\nmessage GetNamespaceResponse {\n  Namespace namespace = 1;\n}\n\nmessage ListNamespacesRequest {\n  string filter = 1;\n}\n\nmessage ListNamespacesResponse {\n  repeated Namespace namespaces = 1;\n}\n\nmessage CreateNamespaceRequest {\n  Namespace namespace = 1;\n}\n\nmessage CreateNamespaceResponse {\n  Namespace namespace = 1;\n}\n\n// UpdateNamespaceRequest updates the metadata for a namespace.\n//\n// The operation should follow semantics described in\n// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,\n// unless otherwise qualified.\nmessage UpdateNamespaceRequest {\n  // Namespace provides the target value, as declared by the mask, for the update.\n  //\n  // The namespace field must be set.\n  Namespace namespace = 1;\n\n  // UpdateMask specifies which fields to perform the update on. If empty,\n  // the operation applies to all fields.\n  //\n  // For the most part, this applies only to selectively updating labels on\n  // the namespace. While field masks are typically limited to ascii alphas\n  // and digits, we just take everything after the \"labels.\" as the map key.\n  google.protobuf.FieldMask update_mask = 2;\n}\n\nmessage UpdateNamespaceResponse {\n  Namespace namespace = 1;\n}\n\nmessage DeleteNamespaceRequest {\n  string name = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\n// Sandbox is a v2 runtime extension that allows more complex execution environments for containers.\n// This adds a notion of groups of containers that share same lifecycle and/or resources.\n// A few good fits for sandbox can be:\n// - A \"pause\" container in k8s, that acts as a parent process for child containers to hold network namespace.\n// - (micro)VMs that launch a VM process and executes containers inside guest OS.\n// containerd in this case remains implementation agnostic and delegates sandbox handling to runtimes.\n// See proposal and discussion here: https://github.com/containerd/containerd/issues/4131\npackage containerd.services.sandbox.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/metrics.proto\";\nimport \"types/mount.proto\";\nimport \"types/platform.proto\";\nimport \"types/sandbox.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/sandbox/v1;sandbox\";\n\n// Store provides a metadata storage interface for sandboxes. Similarly to `Containers`,\n// sandbox object includes info required to start a new instance, but no runtime state.\n// When running a new sandbox instance, store objects are used as base type to create from.\nservice Store {\n  rpc Create(StoreCreateRequest) returns (StoreCreateResponse);\n  rpc Update(StoreUpdateRequest) returns (StoreUpdateResponse);\n  rpc Delete(StoreDeleteRequest) returns (StoreDeleteResponse);\n  rpc List(StoreListRequest) returns (StoreListResponse);\n  rpc Get(StoreGetRequest) returns (StoreGetResponse);\n}\n\nmessage StoreCreateRequest {\n  containerd.types.Sandbox sandbox = 1;\n}\n\nmessage StoreCreateResponse {\n  containerd.types.Sandbox sandbox = 1;\n}\n\nmessage StoreUpdateRequest {\n  containerd.types.Sandbox sandbox = 1;\n  repeated string fields = 2;\n}\n\nmessage StoreUpdateResponse {\n  containerd.types.Sandbox sandbox = 1;\n}\n\nmessage StoreDeleteRequest {\n  string sandbox_id = 1;\n}\n\nmessage StoreDeleteResponse {}\n\nmessage StoreListRequest {\n  repeated string filters = 1;\n}\n\nmessage StoreListResponse {\n  repeated containerd.types.Sandbox list = 1;\n}\n\nmessage StoreGetRequest {\n  string sandbox_id = 1;\n}\n\nmessage StoreGetResponse {\n  containerd.types.Sandbox sandbox = 1;\n}\n\n// Controller is an interface to manage runtime sandbox instances.\nservice Controller {\n  rpc Create(ControllerCreateRequest) returns (ControllerCreateResponse);\n  rpc Start(ControllerStartRequest) returns (ControllerStartResponse);\n  rpc Platform(ControllerPlatformRequest) returns (ControllerPlatformResponse);\n  rpc Stop(ControllerStopRequest) returns (ControllerStopResponse);\n  rpc Wait(ControllerWaitRequest) returns (ControllerWaitResponse);\n  rpc Status(ControllerStatusRequest) returns (ControllerStatusResponse);\n  rpc Shutdown(ControllerShutdownRequest) returns (ControllerShutdownResponse);\n  rpc Metrics(ControllerMetricsRequest) returns (ControllerMetricsResponse);\n  rpc Update(ControllerUpdateRequest) returns (ControllerUpdateResponse);\n}\n\nmessage ControllerCreateRequest {\n  string sandbox_id = 1;\n  repeated containerd.types.Mount rootfs = 2;\n  google.protobuf.Any options = 3;\n  string netns_path = 4;\n  map<string, string> annotations = 5;\n  containerd.types.Sandbox sandbox = 6;\n  string sandboxer = 10;\n}\n\nmessage ControllerCreateResponse {\n  string sandbox_id = 1;\n}\n\nmessage ControllerStartRequest {\n  string sandbox_id = 1;\n  string sandboxer = 10;\n}\n\nmessage ControllerStartResponse {\n  string sandbox_id = 1;\n  uint32 pid = 2;\n  google.protobuf.Timestamp created_at = 3;\n  map<string, string> labels = 4;\n  // Address of the sandbox for containerd to connect,\n  // for calling Task or other APIs serving in the sandbox.\n  // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://<vsock cid>:<port>.\n  string address = 5;\n  uint32 version = 6;\n  google.protobuf.Any spec = 7;\n}\n\nmessage ControllerPlatformRequest {\n  string sandbox_id = 1;\n  string sandboxer = 10;\n}\n\nmessage ControllerPlatformResponse {\n  containerd.types.Platform platform = 1;\n}\n\nmessage ControllerStopRequest {\n  string sandbox_id = 1;\n  uint32 timeout_secs = 2;\n  string sandboxer = 10;\n}\n\nmessage ControllerStopResponse {}\n\nmessage ControllerWaitRequest {\n  string sandbox_id = 1;\n  string sandboxer = 10;\n}\n\nmessage ControllerWaitResponse {\n  uint32 exit_status = 1;\n  google.protobuf.Timestamp exited_at = 2;\n}\n\nmessage ControllerStatusRequest {\n  string sandbox_id = 1;\n  bool verbose = 2;\n  string sandboxer = 10;\n}\n\nmessage ControllerStatusResponse {\n  string sandbox_id = 1;\n  uint32 pid = 2;\n  string state = 3;\n  map<string, string> info = 4;\n  google.protobuf.Timestamp created_at = 5;\n  google.protobuf.Timestamp exited_at = 6;\n  google.protobuf.Any extra = 7;\n  // Address of the sandbox for containerd to connect,\n  // for calling Task or other APIs serving in the sandbox.\n  // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://<vsock cid>:<port>.\n  string address = 8;\n  uint32 version = 9;\n}\n\nmessage ControllerShutdownRequest {\n  string sandbox_id = 1;\n  string sandboxer = 10;\n}\n\nmessage ControllerShutdownResponse {}\n\nmessage ControllerMetricsRequest {\n  string sandbox_id = 1;\n  string sandboxer = 10;\n}\n\nmessage ControllerMetricsResponse {\n  types.Metric metrics = 1;\n}\n\nmessage ControllerUpdateRequest {\n  string sandbox_id = 1;\n  string sandboxer = 2;\n  containerd.types.Sandbox sandbox = 3;\n  repeated string fields = 4;\n}\n\nmessage ControllerUpdateResponse {}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.snapshots.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/field_mask.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/mount.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/snapshots/v1;snapshots\";\n\n// Snapshot service manages snapshots\nservice Snapshots {\n  rpc Prepare(PrepareSnapshotRequest) returns (PrepareSnapshotResponse);\n  rpc View(ViewSnapshotRequest) returns (ViewSnapshotResponse);\n  rpc Mounts(MountsRequest) returns (MountsResponse);\n  rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty);\n  rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty);\n  rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse);\n  rpc Update(UpdateSnapshotRequest) returns (UpdateSnapshotResponse);\n  rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse);\n  rpc Usage(UsageRequest) returns (UsageResponse);\n  rpc Cleanup(CleanupRequest) returns (google.protobuf.Empty);\n}\n\nmessage PrepareSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n  string parent = 3;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 4;\n}\n\nmessage PrepareSnapshotResponse {\n  repeated containerd.types.Mount mounts = 1;\n}\n\nmessage ViewSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n  string parent = 3;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 4;\n}\n\nmessage ViewSnapshotResponse {\n  repeated containerd.types.Mount mounts = 1;\n}\n\nmessage MountsRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nmessage MountsResponse {\n  repeated containerd.types.Mount mounts = 1;\n}\n\nmessage RemoveSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nmessage CommitSnapshotRequest {\n  string snapshotter = 1;\n  string name = 2;\n  string key = 3;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 4;\n\n  string parent = 5;\n}\n\nmessage StatSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nenum Kind {\n  UNKNOWN = 0;\n  VIEW = 1;\n  ACTIVE = 2;\n  COMMITTED = 3;\n}\n\nmessage Info {\n  string name = 1;\n  string parent = 2;\n  Kind kind = 3;\n\n  // CreatedAt provides the time at which the snapshot was created.\n  google.protobuf.Timestamp created_at = 4;\n\n  // UpdatedAt provides the time the info was last updated.\n  google.protobuf.Timestamp updated_at = 5;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 6;\n}\n\nmessage StatSnapshotResponse {\n  Info info = 1;\n}\n\nmessage UpdateSnapshotRequest {\n  string snapshotter = 1;\n  Info info = 2;\n\n  // UpdateMask specifies which fields to perform the update on. If empty,\n  // the operation applies to all fields.\n  //\n  // In info, Name, Parent, Kind, Created are immutable,\n  // other field may be updated using this mask.\n  // If no mask is provided, all mutable field are updated.\n  google.protobuf.FieldMask update_mask = 3;\n}\n\nmessage UpdateSnapshotResponse {\n  Info info = 1;\n}\n\nmessage ListSnapshotsRequest {\n  string snapshotter = 1;\n\n  // Filters contains one or more filters using the syntax defined in the\n  // containerd filter package.\n  //\n  // The returned result will be those that match any of the provided\n  // filters. Expanded, images that match the following will be\n  // returned:\n  //\n  //\tfilters[0] or filters[1] or ... or filters[n-1] or filters[n]\n  //\n  // If filters is zero-length or nil, all items will be returned.\n  repeated string filters = 2;\n}\n\nmessage ListSnapshotsResponse {\n  repeated Info info = 1;\n}\n\nmessage UsageRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nmessage UsageResponse {\n  int64 size = 1;\n  int64 inodes = 2;\n}\n\nmessage CleanupRequest {\n  string snapshotter = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.streaming.v1;\n\nimport \"google/protobuf/any.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/streaming/v1;streaming\";\n\nservice Streaming {\n  rpc Stream(stream google.protobuf.Any) returns (stream google.protobuf.Any);\n}\n\nmessage StreamInit {\n  string id = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.tasks.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/descriptor.proto\";\nimport \"types/metrics.proto\";\nimport \"types/mount.proto\";\nimport \"types/task/task.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/tasks/v1;tasks\";\n\nservice Tasks {\n  // Create a task.\n  rpc Create(CreateTaskRequest) returns (CreateTaskResponse);\n\n  // Start a process.\n  rpc Start(StartRequest) returns (StartResponse);\n\n  // Delete a task and on disk state.\n  rpc Delete(DeleteTaskRequest) returns (DeleteResponse);\n\n  rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse);\n\n  rpc Get(GetRequest) returns (GetResponse);\n\n  rpc List(ListTasksRequest) returns (ListTasksResponse);\n\n  // Kill a task or process.\n  rpc Kill(KillRequest) returns (google.protobuf.Empty);\n\n  rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty);\n\n  rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty);\n\n  rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty);\n\n  rpc Pause(PauseTaskRequest) returns (google.protobuf.Empty);\n\n  rpc Resume(ResumeTaskRequest) returns (google.protobuf.Empty);\n\n  rpc ListPids(ListPidsRequest) returns (ListPidsResponse);\n\n  rpc Checkpoint(CheckpointTaskRequest) returns (CheckpointTaskResponse);\n\n  rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty);\n\n  rpc Metrics(MetricsRequest) returns (MetricsResponse);\n\n  rpc Wait(WaitRequest) returns (WaitResponse);\n}\n\nmessage CreateTaskRequest {\n  string container_id = 1;\n\n  // RootFS provides the pre-chroot mounts to perform in the shim before\n  // executing the container task.\n  //\n  // These are for mounts that cannot be performed in the user namespace.\n  // Typically, these mounts should be resolved from snapshots specified on\n  // the container object.\n  repeated containerd.types.Mount rootfs = 3;\n\n  string stdin = 4;\n  string stdout = 5;\n  string stderr = 6;\n  bool terminal = 7;\n\n  containerd.types.Descriptor checkpoint = 8;\n\n  google.protobuf.Any options = 9;\n\n  string runtime_path = 10;\n}\n\nmessage CreateTaskResponse {\n  string container_id = 1;\n  uint32 pid = 2;\n}\n\nmessage StartRequest {\n  string container_id = 1;\n  string exec_id = 2;\n}\n\nmessage StartResponse {\n  uint32 pid = 1;\n}\n\nmessage DeleteTaskRequest {\n  string container_id = 1;\n}\n\nmessage DeleteResponse {\n  string id = 1;\n  uint32 pid = 2;\n  uint32 exit_status = 3;\n  google.protobuf.Timestamp exited_at = 4;\n}\n\nmessage DeleteProcessRequest {\n  string container_id = 1;\n  string exec_id = 2;\n}\n\nmessage GetRequest {\n  string container_id = 1;\n  string exec_id = 2;\n}\n\nmessage GetResponse {\n  containerd.v1.types.Process process = 1;\n}\n\nmessage ListTasksRequest {\n  string filter = 1;\n}\n\nmessage ListTasksResponse {\n  repeated containerd.v1.types.Process tasks = 1;\n}\n\nmessage KillRequest {\n  string container_id = 1;\n  string exec_id = 2;\n  uint32 signal = 3;\n  bool all = 4;\n}\n\nmessage ExecProcessRequest {\n  string container_id = 1;\n  string stdin = 2;\n  string stdout = 3;\n  string stderr = 4;\n  bool terminal = 5;\n  // Spec for starting a process in the target container.\n  //\n  // For runc, this is a process spec, for example.\n  google.protobuf.Any spec = 6;\n  // id of the exec process\n  string exec_id = 7;\n}\n\nmessage ExecProcessResponse {}\n\nmessage ResizePtyRequest {\n  string container_id = 1;\n  string exec_id = 2;\n  uint32 width = 3;\n  uint32 height = 4;\n}\n\nmessage CloseIORequest {\n  string container_id = 1;\n  string exec_id = 2;\n  bool stdin = 3;\n}\n\nmessage PauseTaskRequest {\n  string container_id = 1;\n}\n\nmessage ResumeTaskRequest {\n  string container_id = 1;\n}\n\nmessage ListPidsRequest {\n  string container_id = 1;\n}\n\nmessage ListPidsResponse {\n  // Processes includes the process ID and additional process information\n  repeated containerd.v1.types.ProcessInfo processes = 1;\n}\n\nmessage CheckpointTaskRequest {\n  string container_id = 1;\n  string parent_checkpoint = 2;\n  google.protobuf.Any options = 3;\n}\n\nmessage CheckpointTaskResponse {\n  repeated containerd.types.Descriptor descriptors = 1;\n}\n\nmessage UpdateTaskRequest {\n  string container_id = 1;\n  google.protobuf.Any resources = 2;\n  map<string, string> annotations = 3;\n}\n\nmessage MetricsRequest {\n  repeated string filters = 1;\n}\n\nmessage MetricsResponse {\n  repeated types.Metric metrics = 1;\n}\n\nmessage WaitRequest {\n  string container_id = 1;\n  string exec_id = 2;\n}\n\nmessage WaitResponse {\n  uint32 exit_status = 1;\n  google.protobuf.Timestamp exited_at = 2;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.transfer.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/empty.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/transfer/v1;transfer\";\n\nservice Transfer {\n  rpc Transfer(TransferRequest) returns (google.protobuf.Empty);\n}\n\nmessage TransferRequest {\n  google.protobuf.Any source = 1;\n  google.protobuf.Any destination = 2;\n  TransferOptions options = 3;\n}\n\nmessage TransferOptions {\n  string progress_stream = 1;\n  // Progress min interval\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.events.ttrpc.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"types/event.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/ttrpc/events/v1;events\";\n\nservice Events {\n  // Forward sends an event that has already been packaged into an envelope\n  // with a timestamp and namespace.\n  //\n  // This is useful if earlier timestamping is required or when forwarding on\n  // behalf of another component, namespace or publisher.\n  rpc Forward(ForwardRequest) returns (google.protobuf.Empty);\n}\n\nmessage ForwardRequest {\n  containerd.types.Envelope envelope = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.version.v1;\n\nimport \"google/protobuf/empty.proto\";\n\n// TODO(stevvooe): Should version service actually be versioned?\noption go_package = \"github.com/containerd/containerd/api/services/version/v1;version\";\n\nservice Version {\n  rpc Version(google.protobuf.Empty) returns (VersionResponse);\n}\n\nmessage VersionResponse {\n  string version = 1;\n  string revision = 2;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/descriptor.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Descriptor describes a blob in a content store.\n//\n// This descriptor can be used to reference content from an\n// oci descriptor found in a manifest.\n// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor\nmessage Descriptor {\n  string media_type = 1;\n  string digest = 2;\n  int64 size = 3;\n  map<string, string> annotations = 5;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/event.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nmessage Envelope {\n  option (containerd.types.fieldpath) = true;\n  google.protobuf.Timestamp timestamp = 1;\n  string namespace = 2;\n  string topic = 3;\n  google.protobuf.Any event = 4;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/fieldpath.proto",
    "content": "// Protocol Buffers for Go with Gadgets\n//\n// Copyright (c) 2013, The GoGo Authors. All rights reserved.\n// http://github.com/gogo/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\npackage containerd.types;\n\nimport \"google/protobuf/descriptor.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nextend google.protobuf.FileOptions {\n  optional bool fieldpath_all = 63300;\n}\n\nextend google.protobuf.MessageOptions {\n  optional bool fieldpath = 64400;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/introspection.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nmessage RuntimeRequest {\n  string runtime_path = 1;\n  // Options correspond to CreateTaskRequest.options.\n  // This is needed to pass the runc binary path, etc.\n  google.protobuf.Any options = 2;\n}\n\nmessage RuntimeVersion {\n  string version = 1;\n  string revision = 2;\n}\n\nmessage RuntimeInfo {\n  string name = 1;\n  RuntimeVersion version = 2;\n  // Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.)\n  google.protobuf.Any options = 3;\n  // OCI-compatible runtimes should use https://github.com/opencontainers/runtime-spec/blob/main/features.md\n  google.protobuf.Any features = 4;\n  // Annotations of the shim. Irrelevant to features.Annotations.\n  map<string, string> annotations = 5;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/metrics.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nmessage Metric {\n  google.protobuf.Timestamp timestamp = 1;\n  string id = 2;\n  google.protobuf.Any data = 3;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/mount.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Mount describes mounts for a container.\n//\n// This type is the lingua franca of ContainerD. All services provide mounts\n// to be used with the container at creation time.\n//\n// The Mount type follows the structure of the mount syscall, including a type,\n// source, target and options.\nmessage Mount {\n  // Type defines the nature of the mount.\n  string type = 1;\n\n  // Source specifies the name of the mount. Depending on mount type, this\n  // may be a volume name or a host path, or even ignored.\n  string source = 2;\n\n  // Target path in container\n  string target = 3;\n\n  // Options specifies zero or more fstab style mount options.\n  repeated string options = 4;\n}\n\nmessage ActiveMount {\n  Mount mount = 1;\n\n  google.protobuf.Timestamp mounted_at = 2;\n\n  string mount_point = 3;\n\n  map<string, string> data = 4;\n}\n\nmessage ActivationInfo {\n  string name = 1;\n\n  repeated ActiveMount active = 2;\n\n  repeated Mount system = 3;\n\n  map<string, string> labels = 4;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/platform.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Platform follows the structure of the OCI platform specification, from\n// descriptors.\nmessage Platform {\n  string os = 1;\n  string architecture = 2;\n  string variant = 3;\n  string os_version = 4;\n  repeated string os_features = 5;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/runc/options/oci.proto",
    "content": "syntax = \"proto3\";\n\npackage containerd.runc.v1;\n\noption go_package = \"github.com/containerd/containerd/api/types/runc/options;options\";\n\nmessage Options {\n  // disable pivot root when creating a container\n  bool no_pivot_root = 1;\n  // create a new keyring for the container\n  bool no_new_keyring = 2;\n  // place the shim in a cgroup\n  string shim_cgroup = 3;\n  // set the I/O's pipes uid\n  uint32 io_uid = 4;\n  // set the I/O's pipes gid\n  uint32 io_gid = 5;\n  // binary name of the runc binary\n  string binary_name = 6;\n  // runc root directory\n  string root = 7;\n  // criu binary path.\n  //\n  // Removed in containerd v2.0: string criu_path = 8;\n  reserved 8;\n  // enable systemd cgroups\n  bool systemd_cgroup = 9;\n  // criu image path\n  string criu_image_path = 10;\n  // criu work path\n  string criu_work_path = 11;\n  // task api address, can be a unix domain socket, or vsock address.\n  // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://<vsock cid>:<port>.\n  string task_api_address = 12;\n  // task api version, currently supported value is 2 and 3.\n  uint32 task_api_version = 13;\n}\n\nmessage CheckpointOptions {\n  // exit the container after a checkpoint\n  bool exit = 1;\n  // checkpoint open tcp connections\n  bool open_tcp = 2;\n  // checkpoint external unix sockets\n  bool external_unix_sockets = 3;\n  // checkpoint terminals (ptys)\n  bool terminal = 4;\n  // allow checkpointing of file locks\n  bool file_locks = 5;\n  // restore provided namespaces as empty namespaces\n  repeated string empty_namespaces = 6;\n  // set the cgroups mode, soft, full, strict\n  string cgroups_mode = 7;\n  // checkpoint image path\n  string image_path = 8;\n  // checkpoint work path\n  string work_path = 9;\n}\n\nmessage ProcessDetails {\n  // exec process id if the process is managed by a shim\n  string exec_id = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/runtimeoptions/v1/api.proto",
    "content": "// To regenerate api.pb.go run `make protos`\nsyntax = \"proto3\";\n\npackage runtimeoptions.v1;\n\noption go_package = \"github.com/containerd/containerd/api/types/runtimeoptions/v1;runtimeoptions\";\n\nmessage Options {\n  // TypeUrl specifies the type of the content inside the config file.\n  string type_url = 1;\n  // ConfigPath specifies the filesystem location of the config file\n  // used by the runtime.\n  string config_path = 2;\n  // Blob specifies an in-memory TOML blob passed from containerd's configuration section\n  // for this runtime. This will be used if config_path is not specified.\n  bytes config_body = 3;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/sandbox.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Sandbox represents a sandbox metadata object that keeps all info required by controller to\n// work with a particular instance.\nmessage Sandbox {\n  // SandboxID is a unique instance identifier within namespace\n  string sandbox_id = 1;\n  message Runtime {\n    // Name is the name of the runtime.\n    string name = 1;\n    // Options specify additional runtime initialization options for the shim (this data will be available in StartShim).\n    // Typically this data expected to be runtime shim implementation specific.\n    google.protobuf.Any options = 2;\n  }\n  // Runtime specifies which runtime to use for executing this container.\n  Runtime runtime = 2;\n  // Spec is sandbox configuration (kin of OCI runtime spec), spec's data will be written to a config.json file in the\n  // bundle directory (similary to OCI spec).\n  google.protobuf.Any spec = 3;\n  // Labels provides an area to include arbitrary data on containers.\n  map<string, string> labels = 4;\n  // CreatedAt is the time the container was first created.\n  google.protobuf.Timestamp created_at = 5;\n  // UpdatedAt is the last time the container was mutated.\n  google.protobuf.Timestamp updated_at = 6;\n  // Extensions allow clients to provide optional blobs that can be handled by runtime.\n  map<string, google.protobuf.Any> extensions = 7;\n  // Sandboxer is the name of the sandbox controller who manages the sandbox.\n  string sandboxer = 10;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/task/task.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.v1.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types/task\";\n\nenum Status {\n  UNKNOWN = 0;\n  CREATED = 1;\n  RUNNING = 2;\n  STOPPED = 3;\n  PAUSED = 4;\n  PAUSING = 5;\n}\n\nmessage Process {\n  string container_id = 1;\n  string id = 2;\n  uint32 pid = 3;\n  Status status = 4;\n  string stdin = 5;\n  string stdout = 6;\n  string stderr = 7;\n  bool terminal = 8;\n  uint32 exit_status = 9;\n  google.protobuf.Timestamp exited_at = 10;\n}\n\nmessage ProcessInfo {\n  // PID is the process ID.\n  uint32 pid = 1;\n  // Info contains additional process information.\n  //\n  // Info varies by platform.\n  google.protobuf.Any info = 2;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/transfer/container.proto",
    "content": "/*\n\tCopyright The containerd Authors.\n\n\tLicensed under the Apache License, Version 2.0 (the \"License\");\n\tyou may not use this file except in compliance with the License.\n\tYou may obtain a copy of the License at\n\n\t\thttp://www.apache.org/licenses/LICENSE-2.0\n\n\tUnless required by applicable law or agreed to in writing, software\n\tdistributed under the License is distributed on an \"AS IS\" BASIS,\n\tWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\tSee the License for the specific language governing permissions and\n\tlimitations under the License.\n*/\nsyntax = \"proto3\";\n\n\npackage containerd.types.transfer;\n\noption go_package = \"github.com/containerd/containerd/api/types/transfer\";\n\n// ContainerPath represents a path within an active container's\n// filesystem. It acts as either a source or destination in a transfer\n// operation, identifying the container and path for archive operations.\nmessage ContainerPath {\n\tstring container_id = 1;\n\tstring path = 2;\n\n\t// When true and path is a directory, return only the directory entry\n\t// itself without walking into its contents. This is useful for\n\t// stat-like operations where only the directory's metadata is needed.\n\tbool no_walk = 3;\n\n\t// When true, preserve the UID/GID from tar headers when extracting\n\t// files. When false, extracted files are owned by the extracting\n\t// process.\n\tbool preserve_ownership = 4;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types.transfer;\n\nimport \"types/platform.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types/transfer\";\n\nmessage ImageStore {\n  string name = 1;\n  map<string, string> labels = 2;\n\n  // Content filters\n\n  repeated types.Platform platforms = 3;\n  bool all_metadata = 4;\n  uint32 manifest_limit = 5;\n\n  // Import naming\n\n  // extra_references are used to set image names on imports of sub-images from the index\n  repeated ImageReference extra_references = 6;\n\n  // Unpack Configuration, multiple allowed\n\n  repeated UnpackConfiguration unpacks = 10;\n}\n\nmessage UnpackConfiguration {\n  // platform is the platform to unpack for, used for resolving manifest and snapshotter\n  // if not provided\n  types.Platform platform = 1;\n\n  // snapshotter to unpack to, if not provided default for platform shoudl be used\n  string snapshotter = 2;\n}\n\n// ImageReference is used to create or find a reference for an image\nmessage ImageReference {\n  string name = 1;\n\n  // is_prefix determines whether the Name should be considered\n  // a prefix (without tag or digest).\n  // For lookup, this may allow matching multiple tags.\n  // For store, this must have a tag or digest added.\n  bool is_prefix = 2;\n\n  // allow_overwrite allows overwriting or ignoring the name if\n  // another reference is provided (such as through an annotation).\n  // Only used if IsPrefix is true.\n  bool allow_overwrite = 3;\n\n  // add_digest adds the manifest digest to the reference.\n  // For lookup, this allows matching tags with any digest.\n  // For store, this allows adding the digest to the name.\n  // Only used if IsPrefix is true.\n  bool add_digest = 4;\n\n  // skip_named_digest only considers digest references which do not\n  // have a non-digested named reference.\n  // For lookup, this will deduplicate digest references when there is a named match.\n  // For store, this only adds this digest reference when there is no matching full\n  // name reference from the prefix.\n  // Only used if IsPrefix is true.\n  bool skip_named_digest = 5;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/transfer/importexport.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types.transfer;\n\nimport \"types/platform.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types/transfer\";\n\nmessage ImageImportStream {\n  // Stream is used to identify the binary input stream for the import operation.\n  // The stream uses the transfer binary stream protocol with the client as the sender.\n  // The binary data is expected to be a raw tar stream.\n  string stream = 1;\n\n  string media_type = 2;\n\n  bool force_compress = 3;\n}\n\nmessage ImageExportStream {\n  // Stream is used to identify the binary output stream for the export operation.\n  // The stream uses the transfer binary stream protocol with the server as the sender.\n  // The binary data is expected to be a raw tar stream.\n  string stream = 1;\n\n  string media_type = 2;\n\n  // The specified platforms\n  repeated types.Platform platforms = 3;\n  // Whether to include all platforms\n  bool all_platforms = 4;\n  // Skips the creation of the Docker compatible manifest.json file\n  bool skip_compatibility_manifest = 5;\n  // Excludes non-distributable blobs such as Windows base layers.\n  bool skip_non_distributable = 6;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/transfer/progress.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types.transfer;\n\nimport \"types/descriptor.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types/transfer\";\n\nmessage Progress {\n  string event = 1;\n  string name = 2;\n  repeated string parents = 3;\n  int64 progress = 4;\n  int64 total = 5;\n  containerd.types.Descriptor desc = 6;\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/transfer/registry.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types.transfer;\n\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types/transfer\";\n\nmessage OCIRegistry {\n  string reference = 1;\n  RegistryResolver resolver = 2;\n}\n\nenum HTTPDebug {\n  DISABLED = 0;\n  // Enable HTTP debugging\n  DEBUG = 1;\n  // Enable HTTP requests tracing\n  TRACE = 2;\n  // Enable both HTTP debugging and requests tracing\n  BOTH = 3;\n}\n\nmessage RegistryResolver {\n  // auth_stream is used to refer to a stream which auth callbacks may be\n  // made on.\n  string auth_stream = 1;\n\n  // Headers\n  map<string, string> headers = 2;\n\n  string host_dir = 3;\n\n  string default_scheme = 4;\n  // Force skip verify\n  // CA callback? Client TLS callback?\n\n  // Whether to debug/trace HTTP requests to OCI registry.\n  HTTPDebug http_debug = 5;\n\n  // Stream ID to use for HTTP logs (when logs are streamed to client).\n  // When empty, logs are written to containerd logs.\n  string logs_stream = 6;\n}\n\n// AuthRequest is sent as a callback on a stream\nmessage AuthRequest {\n  // host is the registry host\n  string host = 1;\n\n  // reference is the namespace and repository name requested from the registry\n  string reference = 2;\n\n  // wwwauthenticate is the HTTP WWW-Authenticate header values returned from the registry\n  repeated string wwwauthenticate = 3;\n}\n\nenum AuthType {\n  NONE = 0;\n\n  // CREDENTIALS is used to exchange username/password for access token\n  // using an oauth or \"Docker Registry Token\" server\n  CREDENTIALS = 1;\n\n  // REFRESH is used to exchange secret for access token using an oauth\n  // or \"Docker Registry Token\" server\n  REFRESH = 2;\n\n  // HEADER is used to set the HTTP Authorization header to secret\n  // directly for the registry.\n  // Value should be `<auth-scheme> <authorization-parameters>`\n  HEADER = 3;\n}\n\nmessage AuthResponse {\n  AuthType authType = 1;\n  string secret = 2;\n  string username = 3;\n  google.protobuf.Timestamp expire_at = 4;\n  // TODO: Stream error\n}\n"
  },
  {
    "path": "crates/client/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/transfer/streaming.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types.transfer;\n\noption go_package = \"github.com/containerd/containerd/api/types/transfer\";\n\nmessage Data {\n  bytes data = 1;\n}\n\nmessage WindowUpdate {\n  int32 update = 1;\n}\n\n// ReadStream carries data from the client to the server (import\n// direction). The client sends data through the stream and the\n// server reads it.\nmessage ReadStream {\n\tstring stream = 1;\n\tstring media_type = 2;\n}\n\n// WriteStream carries data from the server to the client (export\n// direction). The server writes data into the stream and the\n// client receives it.\nmessage WriteStream {\n\tstring stream = 1;\n\tstring media_type = 2;\n}\n"
  },
  {
    "path": "crates/client/vendor/google/protobuf/any.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption go_package = \"google.golang.org/protobuf/types/known/anypb\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"AnyProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n// `Any` contains an arbitrary serialized protocol buffer message along with a\n// URL that describes the type of the serialized message.\n//\n// Protobuf library provides support to pack/unpack Any values in the form\n// of utility functions or additional generated methods of the Any type.\n//\n// Example 1: Pack and unpack a message in C++.\n//\n//     Foo foo = ...;\n//     Any any;\n//     any.PackFrom(foo);\n//     ...\n//     if (any.UnpackTo(&foo)) {\n//       ...\n//     }\n//\n// Example 2: Pack and unpack a message in Java.\n//\n//     Foo foo = ...;\n//     Any any = Any.pack(foo);\n//     ...\n//     if (any.is(Foo.class)) {\n//       foo = any.unpack(Foo.class);\n//     }\n//\n//  Example 3: Pack and unpack a message in Python.\n//\n//     foo = Foo(...)\n//     any = Any()\n//     any.Pack(foo)\n//     ...\n//     if any.Is(Foo.DESCRIPTOR):\n//       any.Unpack(foo)\n//       ...\n//\n//  Example 4: Pack and unpack a message in Go\n//\n//      foo := &pb.Foo{...}\n//      any, err := anypb.New(foo)\n//      if err != nil {\n//        ...\n//      }\n//      ...\n//      foo := &pb.Foo{}\n//      if err := any.UnmarshalTo(foo); err != nil {\n//        ...\n//      }\n//\n// The pack methods provided by protobuf library will by default use\n// 'type.googleapis.com/full.type.name' as the type URL and the unpack\n// methods only use the fully qualified type name after the last '/'\n// in the type URL, for example \"foo.bar.com/x/y.z\" will yield type\n// name \"y.z\".\n//\n//\n// JSON\n// ====\n// The JSON representation of an `Any` value uses the regular\n// representation of the deserialized, embedded message, with an\n// additional field `@type` which contains the type URL. Example:\n//\n//     package google.profile;\n//     message Person {\n//       string first_name = 1;\n//       string last_name = 2;\n//     }\n//\n//     {\n//       \"@type\": \"type.googleapis.com/google.profile.Person\",\n//       \"firstName\": <string>,\n//       \"lastName\": <string>\n//     }\n//\n// If the embedded message type is well-known and has a custom JSON\n// representation, that representation will be embedded adding a field\n// `value` which holds the custom JSON in addition to the `@type`\n// field. Example (for message [google.protobuf.Duration][]):\n//\n//     {\n//       \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n//       \"value\": \"1.212s\"\n//     }\n//\nmessage Any {\n  // A URL/resource name that uniquely identifies the type of the serialized\n  // protocol buffer message. This string must contain at least\n  // one \"/\" character. The last segment of the URL's path must represent\n  // the fully qualified name of the type (as in\n  // `path/google.protobuf.Duration`). The name should be in a canonical form\n  // (e.g., leading \".\" is not accepted).\n  //\n  // In practice, teams usually precompile into the binary all types that they\n  // expect it to use in the context of Any. However, for URLs which use the\n  // scheme `http`, `https`, or no scheme, one can optionally set up a type\n  // server that maps type URLs to message definitions as follows:\n  //\n  // * If no scheme is provided, `https` is assumed.\n  // * An HTTP GET on the URL must yield a [google.protobuf.Type][]\n  //   value in binary format, or produce an error.\n  // * Applications are allowed to cache lookup results based on the\n  //   URL, or have them precompiled into a binary to avoid any\n  //   lookup. Therefore, binary compatibility needs to be preserved\n  //   on changes to types. (Use versioned type names to manage\n  //   breaking changes.)\n  //\n  // Note: this functionality is not currently available in the official\n  // protobuf release, and it is not used for type URLs beginning with\n  // type.googleapis.com.\n  //\n  // Schemes other than `http`, `https` (or the empty scheme) might be\n  // used with implementation specific semantics.\n  //\n  string type_url = 1;\n\n  // Must be a valid serialized protocol buffer of the above specified type.\n  bytes value = 2;\n}\n"
  },
  {
    "path": "crates/client/vendor/google/protobuf/descriptor.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// The messages in this file describe the definitions found in .proto files.\n// A valid .proto file can be translated directly to a FileDescriptorProto\n// without any other information (e.g. without reading its imports).\n\n\nsyntax = \"proto2\";\n\npackage google.protobuf;\n\noption go_package = \"google.golang.org/protobuf/types/descriptorpb\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"DescriptorProtos\";\noption csharp_namespace = \"Google.Protobuf.Reflection\";\noption objc_class_prefix = \"GPB\";\noption cc_enable_arenas = true;\n\n// descriptor.proto must be optimized for speed because reflection-based\n// algorithms don't work during bootstrapping.\noption optimize_for = SPEED;\n\n// The protocol compiler can output a FileDescriptorSet containing the .proto\n// files it parses.\nmessage FileDescriptorSet {\n  repeated FileDescriptorProto file = 1;\n}\n\n// Describes a complete .proto file.\nmessage FileDescriptorProto {\n  optional string name = 1;     // file name, relative to root of source tree\n  optional string package = 2;  // e.g. \"foo\", \"foo.bar\", etc.\n\n  // Names of files imported by this file.\n  repeated string dependency = 3;\n  // Indexes of the public imported files in the dependency list above.\n  repeated int32 public_dependency = 10;\n  // Indexes of the weak imported files in the dependency list.\n  // For Google-internal migration only. Do not use.\n  repeated int32 weak_dependency = 11;\n\n  // All top-level definitions in this file.\n  repeated DescriptorProto message_type = 4;\n  repeated EnumDescriptorProto enum_type = 5;\n  repeated ServiceDescriptorProto service = 6;\n  repeated FieldDescriptorProto extension = 7;\n\n  optional FileOptions options = 8;\n\n  // This field contains optional information about the original source code.\n  // You may safely remove this entire field without harming runtime\n  // functionality of the descriptors -- the information is needed only by\n  // development tools.\n  optional SourceCodeInfo source_code_info = 9;\n\n  // The syntax of the proto file.\n  // The supported values are \"proto2\" and \"proto3\".\n  optional string syntax = 12;\n}\n\n// Describes a message type.\nmessage DescriptorProto {\n  optional string name = 1;\n\n  repeated FieldDescriptorProto field = 2;\n  repeated FieldDescriptorProto extension = 6;\n\n  repeated DescriptorProto nested_type = 3;\n  repeated EnumDescriptorProto enum_type = 4;\n\n  message ExtensionRange {\n    optional int32 start = 1;  // Inclusive.\n    optional int32 end = 2;    // Exclusive.\n\n    optional ExtensionRangeOptions options = 3;\n  }\n  repeated ExtensionRange extension_range = 5;\n\n  repeated OneofDescriptorProto oneof_decl = 8;\n\n  optional MessageOptions options = 7;\n\n  // Range of reserved tag numbers. Reserved tag numbers may not be used by\n  // fields or extension ranges in the same message. Reserved ranges may\n  // not overlap.\n  message ReservedRange {\n    optional int32 start = 1;  // Inclusive.\n    optional int32 end = 2;    // Exclusive.\n  }\n  repeated ReservedRange reserved_range = 9;\n  // Reserved field names, which may not be used by fields in the same message.\n  // A given name may only be reserved once.\n  repeated string reserved_name = 10;\n}\n\nmessage ExtensionRangeOptions {\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\n// Describes a field within a message.\nmessage FieldDescriptorProto {\n  enum Type {\n    // 0 is reserved for errors.\n    // Order is weird for historical reasons.\n    TYPE_DOUBLE = 1;\n    TYPE_FLOAT = 2;\n    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if\n    // negative values are likely.\n    TYPE_INT64 = 3;\n    TYPE_UINT64 = 4;\n    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if\n    // negative values are likely.\n    TYPE_INT32 = 5;\n    TYPE_FIXED64 = 6;\n    TYPE_FIXED32 = 7;\n    TYPE_BOOL = 8;\n    TYPE_STRING = 9;\n    // Tag-delimited aggregate.\n    // Group type is deprecated and not supported in proto3. However, Proto3\n    // implementations should still be able to parse the group wire format and\n    // treat group fields as unknown fields.\n    TYPE_GROUP = 10;\n    TYPE_MESSAGE = 11;  // Length-delimited aggregate.\n\n    // New in version 2.\n    TYPE_BYTES = 12;\n    TYPE_UINT32 = 13;\n    TYPE_ENUM = 14;\n    TYPE_SFIXED32 = 15;\n    TYPE_SFIXED64 = 16;\n    TYPE_SINT32 = 17;  // Uses ZigZag encoding.\n    TYPE_SINT64 = 18;  // Uses ZigZag encoding.\n  }\n\n  enum Label {\n    // 0 is reserved for errors\n    LABEL_OPTIONAL = 1;\n    LABEL_REQUIRED = 2;\n    LABEL_REPEATED = 3;\n  }\n\n  optional string name = 1;\n  optional int32 number = 3;\n  optional Label label = 4;\n\n  // If type_name is set, this need not be set.  If both this and type_name\n  // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.\n  optional Type type = 5;\n\n  // For message and enum types, this is the name of the type.  If the name\n  // starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping\n  // rules are used to find the type (i.e. first the nested types within this\n  // message are searched, then within the parent, on up to the root\n  // namespace).\n  optional string type_name = 6;\n\n  // For extensions, this is the name of the type being extended.  It is\n  // resolved in the same manner as type_name.\n  optional string extendee = 2;\n\n  // For numeric types, contains the original text representation of the value.\n  // For booleans, \"true\" or \"false\".\n  // For strings, contains the default text contents (not escaped in any way).\n  // For bytes, contains the C escaped value.  All bytes >= 128 are escaped.\n  // TODO(kenton):  Base-64 encode?\n  optional string default_value = 7;\n\n  // If set, gives the index of a oneof in the containing type's oneof_decl\n  // list.  This field is a member of that oneof.\n  optional int32 oneof_index = 9;\n\n  // JSON name of this field. The value is set by protocol compiler. If the\n  // user has set a \"json_name\" option on this field, that option's value\n  // will be used. Otherwise, it's deduced from the field's name by converting\n  // it to camelCase.\n  optional string json_name = 10;\n\n  optional FieldOptions options = 8;\n\n  // If true, this is a proto3 \"optional\". When a proto3 field is optional, it\n  // tracks presence regardless of field type.\n  //\n  // When proto3_optional is true, this field must be belong to a oneof to\n  // signal to old proto3 clients that presence is tracked for this field. This\n  // oneof is known as a \"synthetic\" oneof, and this field must be its sole\n  // member (each proto3 optional field gets its own synthetic oneof). Synthetic\n  // oneofs exist in the descriptor only, and do not generate any API. Synthetic\n  // oneofs must be ordered after all \"real\" oneofs.\n  //\n  // For message fields, proto3_optional doesn't create any semantic change,\n  // since non-repeated message fields always track presence. However it still\n  // indicates the semantic detail of whether the user wrote \"optional\" or not.\n  // This can be useful for round-tripping the .proto file. For consistency we\n  // give message fields a synthetic oneof also, even though it is not required\n  // to track presence. This is especially important because the parser can't\n  // tell if a field is a message or an enum, so it must always create a\n  // synthetic oneof.\n  //\n  // Proto2 optional fields do not set this flag, because they already indicate\n  // optional with `LABEL_OPTIONAL`.\n  optional bool proto3_optional = 17;\n}\n\n// Describes a oneof.\nmessage OneofDescriptorProto {\n  optional string name = 1;\n  optional OneofOptions options = 2;\n}\n\n// Describes an enum type.\nmessage EnumDescriptorProto {\n  optional string name = 1;\n\n  repeated EnumValueDescriptorProto value = 2;\n\n  optional EnumOptions options = 3;\n\n  // Range of reserved numeric values. Reserved values may not be used by\n  // entries in the same enum. Reserved ranges may not overlap.\n  //\n  // Note that this is distinct from DescriptorProto.ReservedRange in that it\n  // is inclusive such that it can appropriately represent the entire int32\n  // domain.\n  message EnumReservedRange {\n    optional int32 start = 1;  // Inclusive.\n    optional int32 end = 2;    // Inclusive.\n  }\n\n  // Range of reserved numeric values. Reserved numeric values may not be used\n  // by enum values in the same enum declaration. Reserved ranges may not\n  // overlap.\n  repeated EnumReservedRange reserved_range = 4;\n\n  // Reserved enum value names, which may not be reused. A given name may only\n  // be reserved once.\n  repeated string reserved_name = 5;\n}\n\n// Describes a value within an enum.\nmessage EnumValueDescriptorProto {\n  optional string name = 1;\n  optional int32 number = 2;\n\n  optional EnumValueOptions options = 3;\n}\n\n// Describes a service.\nmessage ServiceDescriptorProto {\n  optional string name = 1;\n  repeated MethodDescriptorProto method = 2;\n\n  optional ServiceOptions options = 3;\n}\n\n// Describes a method of a service.\nmessage MethodDescriptorProto {\n  optional string name = 1;\n\n  // Input and output type names.  These are resolved in the same way as\n  // FieldDescriptorProto.type_name, but must refer to a message type.\n  optional string input_type = 2;\n  optional string output_type = 3;\n\n  optional MethodOptions options = 4;\n\n  // Identifies if client streams multiple client messages\n  optional bool client_streaming = 5 [default = false];\n  // Identifies if server streams multiple server messages\n  optional bool server_streaming = 6 [default = false];\n}\n\n\n// ===================================================================\n// Options\n\n// Each of the definitions above may have \"options\" attached.  These are\n// just annotations which may cause code to be generated slightly differently\n// or may contain hints for code that manipulates protocol messages.\n//\n// Clients may define custom options as extensions of the *Options messages.\n// These extensions may not yet be known at parsing time, so the parser cannot\n// store the values in them.  Instead it stores them in a field in the *Options\n// message called uninterpreted_option. This field must have the same name\n// across all *Options messages. We then use this field to populate the\n// extensions when we build a descriptor, at which point all protos have been\n// parsed and so all extensions are known.\n//\n// Extension numbers for custom options may be chosen as follows:\n// * For options which will only be used within a single application or\n//   organization, or for experimental options, use field numbers 50000\n//   through 99999.  It is up to you to ensure that you do not use the\n//   same number for multiple options.\n// * For options which will be published and used publicly by multiple\n//   independent entities, e-mail protobuf-global-extension-registry@google.com\n//   to reserve extension numbers. Simply provide your project name (e.g.\n//   Objective-C plugin) and your project website (if available) -- there's no\n//   need to explain how you intend to use them. Usually you only need one\n//   extension number. You can declare multiple options with only one extension\n//   number by putting them in a sub-message. See the Custom Options section of\n//   the docs for examples:\n//   https://developers.google.com/protocol-buffers/docs/proto#options\n//   If this turns out to be popular, a web service will be set up\n//   to automatically assign option numbers.\n\nmessage FileOptions {\n\n  // Sets the Java package where classes generated from this .proto will be\n  // placed.  By default, the proto package is used, but this is often\n  // inappropriate because proto packages do not normally start with backwards\n  // domain names.\n  optional string java_package = 1;\n\n\n  // Controls the name of the wrapper Java class generated for the .proto file.\n  // That class will always contain the .proto file's getDescriptor() method as\n  // well as any top-level extensions defined in the .proto file.\n  // If java_multiple_files is disabled, then all the other classes from the\n  // .proto file will be nested inside the single wrapper outer class.\n  optional string java_outer_classname = 8;\n\n  // If enabled, then the Java code generator will generate a separate .java\n  // file for each top-level message, enum, and service defined in the .proto\n  // file.  Thus, these types will *not* be nested inside the wrapper class\n  // named by java_outer_classname.  However, the wrapper class will still be\n  // generated to contain the file's getDescriptor() method as well as any\n  // top-level extensions defined in the file.\n  optional bool java_multiple_files = 10 [default = false];\n\n  // This option does nothing.\n  optional bool java_generate_equals_and_hash = 20 [deprecated=true];\n\n  // If set true, then the Java2 code generator will generate code that\n  // throws an exception whenever an attempt is made to assign a non-UTF-8\n  // byte sequence to a string field.\n  // Message reflection will do the same.\n  // However, an extension field still accepts non-UTF-8 byte sequences.\n  // This option has no effect on when used with the lite runtime.\n  optional bool java_string_check_utf8 = 27 [default = false];\n\n\n  // Generated classes can be optimized for speed or code size.\n  enum OptimizeMode {\n    SPEED = 1;         // Generate complete code for parsing, serialization,\n                       // etc.\n    CODE_SIZE = 2;     // Use ReflectionOps to implement these methods.\n    LITE_RUNTIME = 3;  // Generate code using MessageLite and the lite runtime.\n  }\n  optional OptimizeMode optimize_for = 9 [default = SPEED];\n\n  // Sets the Go package where structs generated from this .proto will be\n  // placed. If omitted, the Go package will be derived from the following:\n  //   - The basename of the package import path, if provided.\n  //   - Otherwise, the package statement in the .proto file, if present.\n  //   - Otherwise, the basename of the .proto file, without extension.\n  optional string go_package = 11;\n\n\n\n\n  // Should generic services be generated in each language?  \"Generic\" services\n  // are not specific to any particular RPC system.  They are generated by the\n  // main code generators in each language (without additional plugins).\n  // Generic services were the only kind of service generation supported by\n  // early versions of google.protobuf.\n  //\n  // Generic services are now considered deprecated in favor of using plugins\n  // that generate code specific to your particular RPC system.  Therefore,\n  // these default to false.  Old code which depends on generic services should\n  // explicitly set them to true.\n  optional bool cc_generic_services = 16 [default = false];\n  optional bool java_generic_services = 17 [default = false];\n  optional bool py_generic_services = 18 [default = false];\n  optional bool php_generic_services = 42 [default = false];\n\n  // Is this file deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for everything in the file, or it will be completely ignored; in the very\n  // least, this is a formalization for deprecating files.\n  optional bool deprecated = 23 [default = false];\n\n  // Enables the use of arenas for the proto messages in this file. This applies\n  // only to generated classes for C++.\n  optional bool cc_enable_arenas = 31 [default = true];\n\n\n  // Sets the objective c class prefix which is prepended to all objective c\n  // generated classes from this .proto. There is no default.\n  optional string objc_class_prefix = 36;\n\n  // Namespace for generated classes; defaults to the package.\n  optional string csharp_namespace = 37;\n\n  // By default Swift generators will take the proto package and CamelCase it\n  // replacing '.' with underscore and use that to prefix the types/symbols\n  // defined. When this options is provided, they will use this value instead\n  // to prefix the types/symbols defined.\n  optional string swift_prefix = 39;\n\n  // Sets the php class prefix which is prepended to all php generated classes\n  // from this .proto. Default is empty.\n  optional string php_class_prefix = 40;\n\n  // Use this option to change the namespace of php generated classes. Default\n  // is empty. When this option is empty, the package name will be used for\n  // determining the namespace.\n  optional string php_namespace = 41;\n\n  // Use this option to change the namespace of php generated metadata classes.\n  // Default is empty. When this option is empty, the proto file name will be\n  // used for determining the namespace.\n  optional string php_metadata_namespace = 44;\n\n  // Use this option to change the package of ruby generated classes. Default\n  // is empty. When this option is not set, the package name will be used for\n  // determining the ruby package.\n  optional string ruby_package = 45;\n\n\n  // The parser stores options it doesn't recognize here.\n  // See the documentation for the \"Options\" section above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message.\n  // See the documentation for the \"Options\" section above.\n  extensions 1000 to max;\n\n  reserved 38;\n}\n\nmessage MessageOptions {\n  // Set true to use the old proto1 MessageSet wire format for extensions.\n  // This is provided for backwards-compatibility with the MessageSet wire\n  // format.  You should not use this for any other reason:  It's less\n  // efficient, has fewer features, and is more complicated.\n  //\n  // The message must be defined exactly as follows:\n  //   message Foo {\n  //     option message_set_wire_format = true;\n  //     extensions 4 to max;\n  //   }\n  // Note that the message cannot have any defined fields; MessageSets only\n  // have extensions.\n  //\n  // All extensions of your type must be singular messages; e.g. they cannot\n  // be int32s, enums, or repeated messages.\n  //\n  // Because this is an option, the above two restrictions are not enforced by\n  // the protocol compiler.\n  optional bool message_set_wire_format = 1 [default = false];\n\n  // Disables the generation of the standard \"descriptor()\" accessor, which can\n  // conflict with a field of the same name.  This is meant to make migration\n  // from proto1 easier; new code should avoid fields named \"descriptor\".\n  optional bool no_standard_descriptor_accessor = 2 [default = false];\n\n  // Is this message deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the message, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating messages.\n  optional bool deprecated = 3 [default = false];\n\n  reserved 4, 5, 6;\n\n  // Whether the message is an automatically generated map entry type for the\n  // maps field.\n  //\n  // For maps fields:\n  //     map<KeyType, ValueType> map_field = 1;\n  // The parsed descriptor looks like:\n  //     message MapFieldEntry {\n  //         option map_entry = true;\n  //         optional KeyType key = 1;\n  //         optional ValueType value = 2;\n  //     }\n  //     repeated MapFieldEntry map_field = 1;\n  //\n  // Implementations may choose not to generate the map_entry=true message, but\n  // use a native map in the target language to hold the keys and values.\n  // The reflection APIs in such implementations still need to work as\n  // if the field is a repeated message field.\n  //\n  // NOTE: Do not set the option in .proto files. Always use the maps syntax\n  // instead. The option should only be implicitly set by the proto compiler\n  // parser.\n  optional bool map_entry = 7;\n\n  reserved 8;  // javalite_serializable\n  reserved 9;  // javanano_as_lite\n\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage FieldOptions {\n  // The ctype option instructs the C++ code generator to use a different\n  // representation of the field than it normally would.  See the specific\n  // options below.  This option is not yet implemented in the open source\n  // release -- sorry, we'll try to include it in a future version!\n  optional CType ctype = 1 [default = STRING];\n  enum CType {\n    // Default mode.\n    STRING = 0;\n\n    CORD = 1;\n\n    STRING_PIECE = 2;\n  }\n  // The packed option can be enabled for repeated primitive fields to enable\n  // a more efficient representation on the wire. Rather than repeatedly\n  // writing the tag and type for each element, the entire array is encoded as\n  // a single length-delimited blob. In proto3, only explicit setting it to\n  // false will avoid using packed encoding.\n  optional bool packed = 2;\n\n  // The jstype option determines the JavaScript type used for values of the\n  // field.  The option is permitted only for 64 bit integral and fixed types\n  // (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING\n  // is represented as JavaScript string, which avoids loss of precision that\n  // can happen when a large value is converted to a floating point JavaScript.\n  // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to\n  // use the JavaScript \"number\" type.  The behavior of the default option\n  // JS_NORMAL is implementation dependent.\n  //\n  // This option is an enum to permit additional types to be added, e.g.\n  // goog.math.Integer.\n  optional JSType jstype = 6 [default = JS_NORMAL];\n  enum JSType {\n    // Use the default type.\n    JS_NORMAL = 0;\n\n    // Use JavaScript strings.\n    JS_STRING = 1;\n\n    // Use JavaScript numbers.\n    JS_NUMBER = 2;\n  }\n\n  // Should this field be parsed lazily?  Lazy applies only to message-type\n  // fields.  It means that when the outer message is initially parsed, the\n  // inner message's contents will not be parsed but instead stored in encoded\n  // form.  The inner message will actually be parsed when it is first accessed.\n  //\n  // This is only a hint.  Implementations are free to choose whether to use\n  // eager or lazy parsing regardless of the value of this option.  However,\n  // setting this option true suggests that the protocol author believes that\n  // using lazy parsing on this field is worth the additional bookkeeping\n  // overhead typically needed to implement it.\n  //\n  // This option does not affect the public interface of any generated code;\n  // all method signatures remain the same.  Furthermore, thread-safety of the\n  // interface is not affected by this option; const methods remain safe to\n  // call from multiple threads concurrently, while non-const methods continue\n  // to require exclusive access.\n  //\n  //\n  // Note that implementations may choose not to check required fields within\n  // a lazy sub-message.  That is, calling IsInitialized() on the outer message\n  // may return true even if the inner message has missing required fields.\n  // This is necessary because otherwise the inner message would have to be\n  // parsed in order to perform the check, defeating the purpose of lazy\n  // parsing.  An implementation which chooses not to check required fields\n  // must be consistent about it.  That is, for any particular sub-message, the\n  // implementation must either *always* check its required fields, or *never*\n  // check its required fields, regardless of whether or not the message has\n  // been parsed.\n  optional bool lazy = 5 [default = false];\n\n  // Is this field deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for accessors, or it will be completely ignored; in the very least, this\n  // is a formalization for deprecating fields.\n  optional bool deprecated = 3 [default = false];\n\n  // For Google-internal migration only. Do not use.\n  optional bool weak = 10 [default = false];\n\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n\n  reserved 4;  // removed jtype\n}\n\nmessage OneofOptions {\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage EnumOptions {\n\n  // Set this option to true to allow mapping different tag names to the same\n  // value.\n  optional bool allow_alias = 2;\n\n  // Is this enum deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the enum, or it will be completely ignored; in the very least, this\n  // is a formalization for deprecating enums.\n  optional bool deprecated = 3 [default = false];\n\n  reserved 5;  // javanano_as_lite\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage EnumValueOptions {\n  // Is this enum value deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the enum value, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating enum values.\n  optional bool deprecated = 1 [default = false];\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage ServiceOptions {\n\n  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC\n  //   framework.  We apologize for hoarding these numbers to ourselves, but\n  //   we were already using them long before we decided to release Protocol\n  //   Buffers.\n\n  // Is this service deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the service, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating services.\n  optional bool deprecated = 33 [default = false];\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage MethodOptions {\n\n  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC\n  //   framework.  We apologize for hoarding these numbers to ourselves, but\n  //   we were already using them long before we decided to release Protocol\n  //   Buffers.\n\n  // Is this method deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the method, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating methods.\n  optional bool deprecated = 33 [default = false];\n\n  // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,\n  // or neither? HTTP based RPC implementation may choose GET verb for safe\n  // methods, and PUT verb for idempotent methods instead of the default POST.\n  enum IdempotencyLevel {\n    IDEMPOTENCY_UNKNOWN = 0;\n    NO_SIDE_EFFECTS = 1;  // implies idempotent\n    IDEMPOTENT = 2;       // idempotent, but may have side effects\n  }\n  optional IdempotencyLevel idempotency_level = 34\n      [default = IDEMPOTENCY_UNKNOWN];\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\n\n// A message representing a option the parser does not recognize. This only\n// appears in options protos created by the compiler::Parser class.\n// DescriptorPool resolves these when building Descriptor objects. Therefore,\n// options protos in descriptor objects (e.g. returned by Descriptor::options(),\n// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions\n// in them.\nmessage UninterpretedOption {\n  // The name of the uninterpreted option.  Each string represents a segment in\n  // a dot-separated name.  is_extension is true iff a segment represents an\n  // extension (denoted with parentheses in options specs in .proto files).\n  // E.g.,{ [\"foo\", false], [\"bar.baz\", true], [\"qux\", false] } represents\n  // \"foo.(bar.baz).qux\".\n  message NamePart {\n    required string name_part = 1;\n    required bool is_extension = 2;\n  }\n  repeated NamePart name = 2;\n\n  // The value of the uninterpreted option, in whatever type the tokenizer\n  // identified it as during parsing. Exactly one of these should be set.\n  optional string identifier_value = 3;\n  optional uint64 positive_int_value = 4;\n  optional int64 negative_int_value = 5;\n  optional double double_value = 6;\n  optional bytes string_value = 7;\n  optional string aggregate_value = 8;\n}\n\n// ===================================================================\n// Optional source code info\n\n// Encapsulates information about the original source file from which a\n// FileDescriptorProto was generated.\nmessage SourceCodeInfo {\n  // A Location identifies a piece of source code in a .proto file which\n  // corresponds to a particular definition.  This information is intended\n  // to be useful to IDEs, code indexers, documentation generators, and similar\n  // tools.\n  //\n  // For example, say we have a file like:\n  //   message Foo {\n  //     optional string foo = 1;\n  //   }\n  // Let's look at just the field definition:\n  //   optional string foo = 1;\n  //   ^       ^^     ^^  ^  ^^^\n  //   a       bc     de  f  ghi\n  // We have the following locations:\n  //   span   path               represents\n  //   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.\n  //   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).\n  //   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).\n  //   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).\n  //   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).\n  //\n  // Notes:\n  // - A location may refer to a repeated field itself (i.e. not to any\n  //   particular index within it).  This is used whenever a set of elements are\n  //   logically enclosed in a single code segment.  For example, an entire\n  //   extend block (possibly containing multiple extension definitions) will\n  //   have an outer location whose path refers to the \"extensions\" repeated\n  //   field without an index.\n  // - Multiple locations may have the same path.  This happens when a single\n  //   logical declaration is spread out across multiple places.  The most\n  //   obvious example is the \"extend\" block again -- there may be multiple\n  //   extend blocks in the same scope, each of which will have the same path.\n  // - A location's span is not always a subset of its parent's span.  For\n  //   example, the \"extendee\" of an extension declaration appears at the\n  //   beginning of the \"extend\" block and is shared by all extensions within\n  //   the block.\n  // - Just because a location's span is a subset of some other location's span\n  //   does not mean that it is a descendant.  For example, a \"group\" defines\n  //   both a type and a field in a single declaration.  Thus, the locations\n  //   corresponding to the type and field and their components will overlap.\n  // - Code which tries to interpret locations should probably be designed to\n  //   ignore those that it doesn't understand, as more types of locations could\n  //   be recorded in the future.\n  repeated Location location = 1;\n  message Location {\n    // Identifies which part of the FileDescriptorProto was defined at this\n    // location.\n    //\n    // Each element is a field number or an index.  They form a path from\n    // the root FileDescriptorProto to the place where the definition.  For\n    // example, this path:\n    //   [ 4, 3, 2, 7, 1 ]\n    // refers to:\n    //   file.message_type(3)  // 4, 3\n    //       .field(7)         // 2, 7\n    //       .name()           // 1\n    // This is because FileDescriptorProto.message_type has field number 4:\n    //   repeated DescriptorProto message_type = 4;\n    // and DescriptorProto.field has field number 2:\n    //   repeated FieldDescriptorProto field = 2;\n    // and FieldDescriptorProto.name has field number 1:\n    //   optional string name = 1;\n    //\n    // Thus, the above path gives the location of a field name.  If we removed\n    // the last element:\n    //   [ 4, 3, 2, 7 ]\n    // this path refers to the whole field declaration (from the beginning\n    // of the label to the terminating semicolon).\n    repeated int32 path = 1 [packed = true];\n\n    // Always has exactly three or four elements: start line, start column,\n    // end line (optional, otherwise assumed same as start line), end column.\n    // These are packed into a single field for efficiency.  Note that line\n    // and column numbers are zero-based -- typically you will want to add\n    // 1 to each before displaying to a user.\n    repeated int32 span = 2 [packed = true];\n\n    // If this SourceCodeInfo represents a complete declaration, these are any\n    // comments appearing before and after the declaration which appear to be\n    // attached to the declaration.\n    //\n    // A series of line comments appearing on consecutive lines, with no other\n    // tokens appearing on those lines, will be treated as a single comment.\n    //\n    // leading_detached_comments will keep paragraphs of comments that appear\n    // before (but not connected to) the current element. Each paragraph,\n    // separated by empty lines, will be one comment element in the repeated\n    // field.\n    //\n    // Only the comment content is provided; comment markers (e.g. //) are\n    // stripped out.  For block comments, leading whitespace and an asterisk\n    // will be stripped from the beginning of each line other than the first.\n    // Newlines are included in the output.\n    //\n    // Examples:\n    //\n    //   optional int32 foo = 1;  // Comment attached to foo.\n    //   // Comment attached to bar.\n    //   optional int32 bar = 2;\n    //\n    //   optional string baz = 3;\n    //   // Comment attached to baz.\n    //   // Another line attached to baz.\n    //\n    //   // Comment attached to qux.\n    //   //\n    //   // Another line attached to qux.\n    //   optional double qux = 4;\n    //\n    //   // Detached comment for corge. This is not leading or trailing comments\n    //   // to qux or corge because there are blank lines separating it from\n    //   // both.\n    //\n    //   // Detached comment for corge paragraph 2.\n    //\n    //   optional string corge = 5;\n    //   /* Block comment attached\n    //    * to corge.  Leading asterisks\n    //    * will be removed. */\n    //   /* Block comment attached to\n    //    * grault. */\n    //   optional int32 grault = 6;\n    //\n    //   // ignored detached comments.\n    optional string leading_comments = 3;\n    optional string trailing_comments = 4;\n    repeated string leading_detached_comments = 6;\n  }\n}\n\n// Describes the relationship between generated code and its original source\n// file. A GeneratedCodeInfo message is associated with only one generated\n// source file, but may contain references to different source .proto files.\nmessage GeneratedCodeInfo {\n  // An Annotation connects some span of text in generated code to an element\n  // of its generating .proto file.\n  repeated Annotation annotation = 1;\n  message Annotation {\n    // Identifies the element in the original source .proto file. This field\n    // is formatted the same as SourceCodeInfo.Location.path.\n    repeated int32 path = 1 [packed = true];\n\n    // Identifies the filesystem path to the original source .proto.\n    optional string source_file = 2;\n\n    // Identifies the starting offset in bytes in the generated code\n    // that relates to the identified object.\n    optional int32 begin = 3;\n\n    // Identifies the ending offset in bytes in the generated code that\n    // relates to the identified offset. The end offset should be one past\n    // the last relevant byte (so the length of the text = end - begin).\n    optional int32 end = 4;\n  }\n}\n"
  },
  {
    "path": "crates/client/vendor/google/protobuf/empty.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption go_package = \"google.golang.org/protobuf/types/known/emptypb\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"EmptyProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\noption cc_enable_arenas = true;\n\n// A generic empty message that you can re-use to avoid defining duplicated\n// empty messages in your APIs. A typical example is to use it as the request\n// or the response type of an API method. For instance:\n//\n//     service Foo {\n//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n//     }\n//\n// The JSON representation for `Empty` is empty JSON object `{}`.\nmessage Empty {}\n"
  },
  {
    "path": "crates/client/vendor/google/protobuf/field_mask.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"FieldMaskProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\noption go_package = \"google.golang.org/protobuf/types/known/fieldmaskpb\";\noption cc_enable_arenas = true;\n\n// `FieldMask` represents a set of symbolic field paths, for example:\n//\n//     paths: \"f.a\"\n//     paths: \"f.b.d\"\n//\n// Here `f` represents a field in some root message, `a` and `b`\n// fields in the message found in `f`, and `d` a field found in the\n// message in `f.b`.\n//\n// Field masks are used to specify a subset of fields that should be\n// returned by a get operation or modified by an update operation.\n// Field masks also have a custom JSON encoding (see below).\n//\n// # Field Masks in Projections\n//\n// When used in the context of a projection, a response message or\n// sub-message is filtered by the API to only contain those fields as\n// specified in the mask. For example, if the mask in the previous\n// example is applied to a response message as follows:\n//\n//     f {\n//       a : 22\n//       b {\n//         d : 1\n//         x : 2\n//       }\n//       y : 13\n//     }\n//     z: 8\n//\n// The result will not contain specific values for fields x,y and z\n// (their value will be set to the default, and omitted in proto text\n// output):\n//\n//\n//     f {\n//       a : 22\n//       b {\n//         d : 1\n//       }\n//     }\n//\n// A repeated field is not allowed except at the last position of a\n// paths string.\n//\n// If a FieldMask object is not present in a get operation, the\n// operation applies to all fields (as if a FieldMask of all fields\n// had been specified).\n//\n// Note that a field mask does not necessarily apply to the\n// top-level response message. In case of a REST get operation, the\n// field mask applies directly to the response, but in case of a REST\n// list operation, the mask instead applies to each individual message\n// in the returned resource list. In case of a REST custom method,\n// other definitions may be used. Where the mask applies will be\n// clearly documented together with its declaration in the API.  In\n// any case, the effect on the returned resource/resources is required\n// behavior for APIs.\n//\n// # Field Masks in Update Operations\n//\n// A field mask in update operations specifies which fields of the\n// targeted resource are going to be updated. The API is required\n// to only change the values of the fields as specified in the mask\n// and leave the others untouched. If a resource is passed in to\n// describe the updated values, the API ignores the values of all\n// fields not covered by the mask.\n//\n// If a repeated field is specified for an update operation, new values will\n// be appended to the existing repeated field in the target resource. Note that\n// a repeated field is only allowed in the last position of a `paths` string.\n//\n// If a sub-message is specified in the last position of the field mask for an\n// update operation, then new value will be merged into the existing sub-message\n// in the target resource.\n//\n// For example, given the target message:\n//\n//     f {\n//       b {\n//         d: 1\n//         x: 2\n//       }\n//       c: [1]\n//     }\n//\n// And an update message:\n//\n//     f {\n//       b {\n//         d: 10\n//       }\n//       c: [2]\n//     }\n//\n// then if the field mask is:\n//\n//  paths: [\"f.b\", \"f.c\"]\n//\n// then the result will be:\n//\n//     f {\n//       b {\n//         d: 10\n//         x: 2\n//       }\n//       c: [1, 2]\n//     }\n//\n// An implementation may provide options to override this default behavior for\n// repeated and message fields.\n//\n// In order to reset a field's value to the default, the field must\n// be in the mask and set to the default value in the provided resource.\n// Hence, in order to reset all fields of a resource, provide a default\n// instance of the resource and set all fields in the mask, or do\n// not provide a mask as described below.\n//\n// If a field mask is not present on update, the operation applies to\n// all fields (as if a field mask of all fields has been specified).\n// Note that in the presence of schema evolution, this may mean that\n// fields the client does not know and has therefore not filled into\n// the request will be reset to their default. If this is unwanted\n// behavior, a specific service may require a client to always specify\n// a field mask, producing an error if not.\n//\n// As with get operations, the location of the resource which\n// describes the updated values in the request message depends on the\n// operation kind. In any case, the effect of the field mask is\n// required to be honored by the API.\n//\n// ## Considerations for HTTP REST\n//\n// The HTTP kind of an update operation which uses a field mask must\n// be set to PATCH instead of PUT in order to satisfy HTTP semantics\n// (PUT must only be used for full updates).\n//\n// # JSON Encoding of Field Masks\n//\n// In JSON, a field mask is encoded as a single string where paths are\n// separated by a comma. Fields name in each path are converted\n// to/from lower-camel naming conventions.\n//\n// As an example, consider the following message declarations:\n//\n//     message Profile {\n//       User user = 1;\n//       Photo photo = 2;\n//     }\n//     message User {\n//       string display_name = 1;\n//       string address = 2;\n//     }\n//\n// In proto a field mask for `Profile` may look as such:\n//\n//     mask {\n//       paths: \"user.display_name\"\n//       paths: \"photo\"\n//     }\n//\n// In JSON, the same mask is represented as below:\n//\n//     {\n//       mask: \"user.displayName,photo\"\n//     }\n//\n// # Field Masks and Oneof Fields\n//\n// Field masks treat fields in oneofs just as regular fields. Consider the\n// following message:\n//\n//     message SampleMessage {\n//       oneof test_oneof {\n//         string name = 4;\n//         SubMessage sub_message = 9;\n//       }\n//     }\n//\n// The field mask can be:\n//\n//     mask {\n//       paths: \"name\"\n//     }\n//\n// Or:\n//\n//     mask {\n//       paths: \"sub_message\"\n//     }\n//\n// Note that oneof type names (\"test_oneof\" in this case) cannot be used in\n// paths.\n//\n// ## Field Mask Verification\n//\n// The implementation of any API method which has a FieldMask type field in the\n// request should verify the included field paths, and return an\n// `INVALID_ARGUMENT` error if any path is unmappable.\nmessage FieldMask {\n  // The set of field mask paths.\n  repeated string paths = 1;\n}\n"
  },
  {
    "path": "crates/client/vendor/google/protobuf/timestamp.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption cc_enable_arenas = true;\noption go_package = \"google.golang.org/protobuf/types/known/timestamppb\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"TimestampProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n// A Timestamp represents a point in time independent of any time zone or local\n// calendar, encoded as a count of seconds and fractions of seconds at\n// nanosecond resolution. The count is relative to an epoch at UTC midnight on\n// January 1, 1970, in the proleptic Gregorian calendar which extends the\n// Gregorian calendar backwards to year one.\n//\n// All minutes are 60 seconds long. Leap seconds are \"smeared\" so that no leap\n// second table is needed for interpretation, using a [24-hour linear\n// smear](https://developers.google.com/time/smear).\n//\n// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By\n// restricting to that range, we ensure that we can convert to and from [RFC\n// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.\n//\n// # Examples\n//\n// Example 1: Compute Timestamp from POSIX `time()`.\n//\n//     Timestamp timestamp;\n//     timestamp.set_seconds(time(NULL));\n//     timestamp.set_nanos(0);\n//\n// Example 2: Compute Timestamp from POSIX `gettimeofday()`.\n//\n//     struct timeval tv;\n//     gettimeofday(&tv, NULL);\n//\n//     Timestamp timestamp;\n//     timestamp.set_seconds(tv.tv_sec);\n//     timestamp.set_nanos(tv.tv_usec * 1000);\n//\n// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.\n//\n//     FILETIME ft;\n//     GetSystemTimeAsFileTime(&ft);\n//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;\n//\n//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z\n//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.\n//     Timestamp timestamp;\n//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));\n//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));\n//\n// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.\n//\n//     long millis = System.currentTimeMillis();\n//\n//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)\n//         .setNanos((int) ((millis % 1000) * 1000000)).build();\n//\n//\n// Example 5: Compute Timestamp from Java `Instant.now()`.\n//\n//     Instant now = Instant.now();\n//\n//     Timestamp timestamp =\n//         Timestamp.newBuilder().setSeconds(now.getEpochSecond())\n//             .setNanos(now.getNano()).build();\n//\n//\n// Example 6: Compute Timestamp from current time in Python.\n//\n//     timestamp = Timestamp()\n//     timestamp.GetCurrentTime()\n//\n// # JSON Mapping\n//\n// In JSON format, the Timestamp type is encoded as a string in the\n// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the\n// format is \"{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z\"\n// where {year} is always expressed using four digits while {month}, {day},\n// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional\n// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),\n// are optional. The \"Z\" suffix indicates the timezone (\"UTC\"); the timezone\n// is required. A proto3 JSON serializer should always use UTC (as indicated by\n// \"Z\") when printing the Timestamp type and a proto3 JSON parser should be\n// able to accept both UTC and other timezones (as indicated by an offset).\n//\n// For example, \"2017-01-15T01:30:15.01Z\" encodes 15.01 seconds past\n// 01:30 UTC on January 15, 2017.\n//\n// In JavaScript, one can convert a Date object to this format using the\n// standard\n// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)\n// method. In Python, a standard `datetime.datetime` object can be converted\n// to this format using\n// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with\n// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use\n// the Joda Time's [`ISODateTimeFormat.dateTime()`](\n// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D\n// ) to obtain a formatter capable of generating timestamps in this format.\n//\n//\nmessage Timestamp {\n  // Represents seconds of UTC time since Unix epoch\n  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n  // 9999-12-31T23:59:59Z inclusive.\n  int64 seconds = 1;\n\n  // Non-negative fractions of a second at nanosecond resolution. Negative\n  // second values with fractions must still have non-negative nanos values\n  // that count forward in time. Must be from 0 to 999,999,999\n  // inclusive.\n  int32 nanos = 2;\n}\n"
  },
  {
    "path": "crates/client/vendor/google/rpc/status.proto",
    "content": "// Copyright 2020 Google LLC\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nsyntax = \"proto3\";\n\npackage google.rpc;\n\nimport \"google/protobuf/any.proto\";\n\noption cc_enable_arenas = true;\noption go_package = \"google.golang.org/genproto/googleapis/rpc/status;status\";\noption java_multiple_files = true;\noption java_outer_classname = \"StatusProto\";\noption java_package = \"com.google.rpc\";\noption objc_class_prefix = \"RPC\";\n\n// The `Status` type defines a logical error model that is suitable for\n// different programming environments, including REST APIs and RPC APIs. It is\n// used by [gRPC](https://github.com/grpc). Each `Status` message contains\n// three pieces of data: error code, error message, and error details.\n//\n// You can find out more about this error model and how to work with it in the\n// [API Design Guide](https://cloud.google.com/apis/design/errors).\nmessage Status {\n  // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].\n  int32 code = 1;\n\n  // A developer-facing error message, which should be in English. Any\n  // user-facing error message should be localized and sent in the\n  // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.\n  string message = 2;\n\n  // A list of messages that carry the error details.  There is a common set of\n  // message types for APIs to use.\n  repeated google.protobuf.Any details = 3;\n}\n"
  },
  {
    "path": "crates/logging/Cargo.toml",
    "content": "[package]\nname = \"containerd-shim-logging\"\nversion = \"0.1.1\"\nauthors = [\n  \"Maksym Pavlenko <pavlenko.maksym@gmail.com>\",\n  \"The containerd Authors\",\n]\ndescription = \"Logger extension for containerd v2 runtime\"\nkeywords = [\"containerd\", \"shim\", \"containers\"]\ncategories = [\"api-bindings\", \"asynchronous\"]\n\nedition.workspace = true\nlicense.workspace = true\nrepository.workspace = true\nhomepage.workspace = true\n\n[features]\ndocs = []\n\n[package.metadata.docs.rs]\nfeatures = [\"docs\"]\n"
  },
  {
    "path": "crates/logging/README.md",
    "content": "# Shim logging binaries for containerd\n\n[![Crates.io](https://img.shields.io/crates/v/containerd-shim-logging)](https://crates.io/crates/containerd-shim-logging)\n[![docs.rs](https://img.shields.io/docsrs/containerd-shim-logging)](https://docs.rs/containerd-shim-logging/latest/containerd_shim_logging/)\n[![Crates.io](https://img.shields.io/crates/l/containerd-shim-logging)](https://github.com/containerd/rust-extensions/blob/main/LICENSE)\n[![CI](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml)\n\nShim v2 runtime supports pluggable logging binaries via stdio URIs.\nThis crate implement `logging::run` to easy custom logger implementations in Rust.\n\n[containerd Documentation](https://github.com/containerd/containerd/tree/master/core/runtime/v2#logging)\n\n## Example\n\nThere is a journal example available as reference (originally written in Go [here](https://github.com/containerd/containerd/tree/dbef1d56d7ebc05bc4553d72c419ed5ce025b05d/runtime/v2#logging)):\n\n```bash\n# Build\n$ sudo yum install systemd-devel\n$ cargo build --example journal\n\n# Run\n$ ctr i pull docker.io/library/hello-world:latest\n$ ctr run --rm --log-uri=binary:////path/to/journal_binary docker.io/library/hello-world:latest hello\n$ journalctl -f _COMM=journal\n-- Logs begin at Thu 2021-05-20 15:47:51 PDT. --\nJul 22 11:53:35 dev journal[3233968]:\nJul 22 11:53:35 dev journal[3233968]: To try something more ambitious, you can run an Ubuntu container with:\nJul 22 11:53:35 dev journal[3233968]:  $ docker run -it ubuntu bash\nJul 22 11:53:35 dev journal[3233968]:\nJul 22 11:53:35 dev journal[3233968]: Share images, automate workflows, and more with a free Docker ID:\nJul 22 11:53:35 dev journal[3233968]:  https://hub.docker.com/\nJul 22 11:53:35 dev journal[3233968]:\nJul 22 11:53:35 dev journal[3233968]: For more examples and ideas, visit:\nJul 22 11:53:35 dev journal[3233968]:  https://docs.docker.com/get-started/\nJul 22 11:53:35 dev journal[3233968]:\n```\n"
  },
  {
    "path": "crates/logging/examples/journal.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{fs, io, io::BufRead, thread};\n\nuse containerd_shim_logging as logging;\nuse logging::{Config, Driver};\n\nfn pump(reader: fs::File) {\n    io::BufReader::new(reader)\n        .lines()\n        .map_while(Result::ok)\n        .for_each(|_str| {\n            // Write log string to destination here.\n            // For instance with journald:\n            //  systemd::journal::print(0, &str);\n        });\n}\n\nstruct Journal {\n    stdout_handle: thread::JoinHandle<()>,\n    stderr_handle: thread::JoinHandle<()>,\n}\n\nimpl Driver for Journal {\n    type Error = String;\n\n    fn new(config: Config) -> Result<Self, Self::Error> {\n        let stdout = config.stdout;\n        let stderr = config.stderr;\n\n        Ok(Journal {\n            stdout_handle: thread::spawn(|| pump(stdout)),\n            stderr_handle: thread::spawn(|| pump(stderr)),\n        })\n    }\n\n    fn wait(self) -> Result<(), Self::Error> {\n        self.stdout_handle\n            .join()\n            .map_err(|err| format!(\"{:?}\", err))?;\n        self.stderr_handle\n            .join()\n            .map_err(|err| format!(\"{:?}\", err))?;\n        Ok(())\n    }\n}\n\nfn main() {\n    logging::run::<Journal>()\n}\n"
  },
  {
    "path": "crates/logging/src/lib.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n#![cfg_attr(feature = \"docs\", doc = include_str!(\"../README.md\"))]\n\nuse std::{env, fmt, fs, os::unix::io::FromRawFd, process};\n\n/// Logging binary configuration received from containerd.\n#[derive(Debug)]\npub struct Config {\n    /// Container id.\n    pub id: String,\n    /// Container namespace.\n    pub namespace: String,\n    /// Stdout to forward logs from.\n    pub stdout: fs::File,\n    /// Stderr to forward logs from.\n    pub stderr: fs::File,\n}\n\nimpl Config {\n    /// Creates a new configuration object.\n    ///\n    /// It'll query environment provided by containerd to fill up [Config] structure fields.\n    ///\n    /// # Panics\n    /// Function call will panic if the environment is incorrect (note that this should be happen\n    /// if launched from containerd).\n    ///\n    fn new() -> Config {\n        let id = match env::var(\"CONTAINER_ID\") {\n            Ok(id) => id,\n            Err(_) => handle_err(\"CONTAINER_ID env not found\"),\n        };\n\n        let namespace = match env::var(\"CONTAINER_NAMESPACE\") {\n            Ok(ns) => ns,\n            Err(_) => handle_err(\"CONTAINER_NAMESPACE env not found\"),\n        };\n\n        let stdout = unsafe { fs::File::from_raw_fd(3) };\n        let stderr = unsafe { fs::File::from_raw_fd(4) };\n\n        Config {\n            id,\n            namespace,\n            stdout,\n            stderr,\n        }\n    }\n}\n\n/// Signal file wrapper.\n/// containerd uses a file with fd 5 as a signaling mechanism between the daemon and logger process.\n/// This is a wrapper for convenience.\n///\n/// See [logging_unix.go] for details.\n///\n/// [logging_unix.go]: https://github.com/containerd/containerd/blob/dbef1d56d7ebc05bc4553d72c419ed5ce025b05d/runtime/v2/logging/logging_unix.go#L44\nstruct Ready(fs::File);\n\nimpl Ready {\n    fn new() -> Ready {\n        Ready(unsafe { fs::File::from_raw_fd(5) })\n    }\n\n    /// Signal that we are ready and setup for the container to be started.\n    fn signal(self) {\n        drop(self.0)\n    }\n}\n\n/// Driver is a trait to be implemented by v2 logging binaries.\n///\n/// This trait is Rusty alternative to Go's `LoggerFunc`.\n///\n/// # Example\n///\n/// ```rust\n/// use containerd_shim_logging::{Config, Driver};\n///\n/// struct Logger;\n///\n/// impl Driver for Logger {\n///     type Error = ();\n///\n///     // Launch logger threads here.\n///     fn new(config: Config) -> Result<Self, Self::Error> {\n///         Ok(Logger {})\n///     }\n///\n///     // Wait for threads to finish.\n///     // In this example `Logger` will finish immediately.\n///     fn wait(self) -> Result<(), Self::Error> {\n///         Ok(())\n///     }\n/// }\n/// ```\npub trait Driver: Sized {\n    /// The error type to be returned from driver routines if something goes wrong.\n    type Error: fmt::Debug;\n\n    /// Create and run a new binary logger from the provided [Config].\n    ///\n    /// Implementations are expected to start the logger driver (typically by spawning threads).\n    /// Once returned, the crate will signal containerd that we're ready to log.\n    fn new(config: Config) -> Result<Self, Self::Error>;\n\n    /// Wait for the driver to finish.\n    ///\n    /// Once returned from this function, the binary logger process will shutdown.\n    fn wait(self) -> Result<(), Self::Error>;\n}\n\n/// Entry point to run the logging driver.\n///\n/// Typically `run` must be called from the `main` function to launch the driver.\npub fn run<D: Driver>() {\n    let config = Config::new();\n    let ready = Ready::new();\n\n    // Initialize log driver\n    let logger = match D::new(config) {\n        Ok(driver) => driver,\n        Err(err) => handle_err(err),\n    };\n\n    // Signal ready to pump log data\n    ready.signal();\n\n    // Run and block until exit\n    if let Err(err) = logger.wait() {\n        handle_err(err)\n    } else {\n        process::exit(0);\n    }\n}\n\n#[inline]\nfn handle_err(err: impl fmt::Debug) -> ! {\n    eprintln!(\"{:?}\", err);\n    process::exit(1);\n}\n"
  },
  {
    "path": "crates/runc/Cargo.toml",
    "content": "[package]\nname = \"runc\"\nversion = \"0.3.0\"\nauthors = [\"Yuna Tomida <ytomida.mmm@gmail.com>\", \"The containerd Authors\"]\ndescription = \"A crate for consuming the runc binary in your Rust applications\"\nkeywords = [\"containerd\", \"containers\", \"runc\"]\ncategories = [\"api-bindings\", \"asynchronous\"]\n\nedition.workspace = true\nlicense.workspace = true\nrepository.workspace = true\nhomepage.workspace = true\n\n[features]\nasync = [\"tokio\", \"async-trait\", \"tokio-pipe\"]\ndocs = []\n\n[dependencies]\nlibc.workspace = true\nlog.workspace = true\nnix = { workspace = true, features = [\"user\", \"fs\"] }\noci-spec = { workspace = true, features = [\"runtime\"] }\nserde = { workspace = true, features = [\"derive\", \"std\"] }\nserde_json = { workspace = true, features = [\"std\"] }\ntempfile.workspace = true\nthiserror.workspace = true\ntime = { workspace = true, features = [\"serde\", \"std\"] }\nuuid = { workspace = true, features = [\"v4\"] }\n\n# Async dependencies\nasync-trait = { workspace = true, optional = true }\ntokio = { workspace = true, features = [\"macros\", \"rt-multi-thread\", \"process\", \"sync\", \"fs\", \"io-util\", \"net\", \"time\"], optional = true }\ntokio-pipe = { version = \"0.2.12\", default-features = false, optional = true }\n\n[package.metadata.docs.rs]\nfeatures = [\"docs\"]\n"
  },
  {
    "path": "crates/runc/README.md",
    "content": "# Rust bindings for runc CLI\n\n[![Crates.io](https://img.shields.io/crates/v/runc)](https://crates.io/crates/runc)\n[![docs.rs](https://img.shields.io/docsrs/runc)](https://docs.rs/runc/latest/runc/)\n[![Crates.io](https://img.shields.io/crates/l/containerd-shim)](https://github.com/containerd/rust-extensions/blob/main/LICENSE)\n[![CI](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml)\n\nA crate for consuming the runc binary in your Rust applications, similar to [go-runc](https://github.com/containerd/go-runc) for Go.\nThis crate is based on archived [rust-runc](https://github.com/pwFoo/rust-runc).\n\n## Usage\nBoth sync/async version is available.\nYou can build runc client with `RuncConfig` in method chaining style.\nCall `build()` or `build_async()` to get client.\nNote that async client depends on [tokio](https://github.com/tokio-rs/tokio), then please use it on tokio runtime.\n\n```rust,ignore\n#[tokio::main]\nasync fn main() {\n    let config = runc::GlobalOpts::new()\n        .root(\"./new_root\")\n        .debug(false)\n        .log(\"/path/to/logfile.json\")\n        .log_format(runc::LogFormat::Json)\n        .rootless(true);\n\n    let client = config.build_async().unwrap();\n\n    let opts = runc::options::CreateOpts::new()\n        .pid_file(\"/path/to/pid/file\")\n        .no_pivot(true);\n\n    client.create(\"container-id\", \"path/to/bundle\", Some(&opts)).unwrap();\n}\n```\n\n## Limitations\n- Supported commands are only:\n    - create\n    - start\n    - state\n    - kill\n    - delete\n- Exec is **not** available in `RuncAsyncClient` now.\n- Console utilites are **not** available\n    - see [Go version](https://github.com/containerd/go-runc/blob/main/console.go)\n"
  },
  {
    "path": "crates/runc/src/asynchronous/io.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{fmt::Debug, io::Result, process::Stdio};\n\nuse async_trait::async_trait;\nuse nix::unistd::{Gid, Uid};\nuse tokio::fs::OpenOptions;\n\npub use crate::Io;\nuse crate::{Command, Pipe, PipedIo};\n\n#[derive(Debug, Clone)]\npub struct IOOption {\n    pub open_stdin: bool,\n    pub open_stdout: bool,\n    pub open_stderr: bool,\n}\n\nimpl Default for IOOption {\n    fn default() -> Self {\n        Self {\n            open_stdin: true,\n            open_stdout: true,\n            open_stderr: true,\n        }\n    }\n}\n\nimpl PipedIo {\n    pub fn new(uid: u32, gid: u32, opts: &IOOption) -> std::io::Result<Self> {\n        Ok(Self {\n            stdin: if opts.open_stdin {\n                Self::create_pipe(uid, gid, true)?\n            } else {\n                None\n            },\n            stdout: if opts.open_stdout {\n                Self::create_pipe(uid, gid, true)?\n            } else {\n                None\n            },\n            stderr: if opts.open_stderr {\n                Self::create_pipe(uid, gid, true)?\n            } else {\n                None\n            },\n        })\n    }\n\n    fn create_pipe(uid: u32, gid: u32, stdin: bool) -> std::io::Result<Option<Pipe>> {\n        let pipe = Pipe::new()?;\n        let uid = Some(Uid::from_raw(uid));\n        let gid = Some(Gid::from_raw(gid));\n        if stdin {\n            let rd = pipe.rd.try_clone()?;\n            nix::unistd::fchown(rd, uid, gid)?;\n        } else {\n            let wr = pipe\n                .try_clone_wr()\n                .ok_or_else(|| std::io::Error::other(\"write end closed\"))?;\n            nix::unistd::fchown(wr, uid, gid)?;\n        }\n        Ok(Some(pipe))\n    }\n}\n\n/// IO driver to direct output/error messages to /dev/null.\n///\n/// With this Io driver, all methods of [crate::Runc] can't capture the output/error messages.\n#[derive(Debug)]\npub struct NullIo {\n    dev_null: std::sync::Mutex<Option<std::fs::File>>,\n}\n\nimpl NullIo {\n    pub fn new() -> std::io::Result<Self> {\n        let f = std::fs::OpenOptions::new().read(true).open(\"/dev/null\")?;\n        let dev_null = std::sync::Mutex::new(Some(f));\n        Ok(Self { dev_null })\n    }\n}\n\n#[async_trait]\nimpl Io for NullIo {\n    async fn set(&self, cmd: &mut Command) -> std::io::Result<()> {\n        if let Some(null) = self.dev_null.lock().unwrap().as_ref() {\n            cmd.stdout(null.try_clone()?);\n            cmd.stderr(null.try_clone()?);\n        }\n        Ok(())\n    }\n\n    async fn close_after_start(&self) {\n        let mut m = self.dev_null.lock().unwrap();\n        let _ = m.take();\n    }\n}\n\n/// Io driver based on Stdio::inherited(), to direct outputs/errors to stdio.\n///\n/// With this Io driver, all methods of [crate::Runc] can't capture the output/error messages.\n#[derive(Debug)]\npub struct InheritedStdIo {}\n\nimpl InheritedStdIo {\n    pub fn new() -> std::io::Result<Self> {\n        Ok(InheritedStdIo {})\n    }\n}\n\n#[async_trait]\nimpl Io for InheritedStdIo {\n    async fn set(&self, cmd: &mut Command) -> std::io::Result<()> {\n        cmd.stdin(Stdio::null())\n            .stdout(Stdio::inherit())\n            .stderr(Stdio::inherit());\n        Ok(())\n    }\n\n    async fn close_after_start(&self) {}\n}\n\n/// Io driver based on Stdio::piped(), to capture outputs/errors from runC.\n///\n/// With this Io driver, methods of [crate::Runc] may capture the output/error messages.\n#[derive(Debug)]\npub struct PipedStdIo {}\n\nimpl PipedStdIo {\n    pub fn new() -> std::io::Result<Self> {\n        Ok(PipedStdIo {})\n    }\n}\n#[async_trait]\nimpl Io for PipedStdIo {\n    async fn set(&self, cmd: &mut Command) -> std::io::Result<()> {\n        cmd.stdin(Stdio::null())\n            .stdout(Stdio::piped())\n            .stderr(Stdio::piped());\n        Ok(())\n    }\n\n    async fn close_after_start(&self) {}\n}\n\n/// FIFO for the scenario that set FIFO for command Io.\n#[derive(Debug)]\npub struct FIFO {\n    pub stdin: Option<String>,\n    pub stdout: Option<String>,\n    pub stderr: Option<String>,\n}\n#[async_trait]\nimpl Io for FIFO {\n    async fn set(&self, cmd: &mut Command) -> Result<()> {\n        if let Some(path) = self.stdin.as_ref() {\n            let stdin = OpenOptions::new()\n                .read(true)\n                .custom_flags(libc::O_NONBLOCK)\n                .open(path)\n                .await?;\n            cmd.stdin(stdin.into_std().await);\n        }\n\n        if let Some(path) = self.stdout.as_ref() {\n            let stdout = OpenOptions::new().write(true).open(path).await?;\n            cmd.stdout(stdout.into_std().await);\n        }\n\n        if let Some(path) = self.stderr.as_ref() {\n            let stderr = OpenOptions::new().write(true).open(path).await?;\n            cmd.stderr(stderr.into_std().await);\n        }\n\n        Ok(())\n    }\n\n    async fn close_after_start(&self) {}\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[cfg(not(target_os = \"macos\"))]\n    #[test]\n    fn test_io_option() {\n        let opts = IOOption {\n            open_stdin: false,\n            open_stdout: false,\n            open_stderr: false,\n        };\n        let io = PipedIo::new(1000, 1000, &opts).unwrap();\n\n        assert!(io.stdin().is_none());\n        assert!(io.stdout().is_none());\n        assert!(io.stderr().is_none());\n    }\n\n    #[tokio::test]\n    async fn test_null_io() {\n        let io = NullIo::new().unwrap();\n        assert!(io.stdin().is_none());\n        assert!(io.stdout().is_none());\n        assert!(io.stderr().is_none());\n    }\n}\n"
  },
  {
    "path": "crates/runc/src/asynchronous/mod.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\npub mod io;\nmod pipe;\nmod runc;\nuse std::{fmt::Debug, io::Result, os::fd::AsRawFd};\n\nuse async_trait::async_trait;\npub use pipe::Pipe;\npub use runc::{DefaultExecutor, Spawner};\nuse tokio::io::{AsyncRead, AsyncWrite};\n\nuse crate::Command;\n#[async_trait]\npub trait Io: Debug + Send + Sync {\n    fn stdin(&self) -> Option<Box<dyn AsyncWrite + Send + Sync + Unpin>> {\n        None\n    }\n\n    fn stdout(&self) -> Option<Box<dyn AsyncRead + Send + Sync + Unpin>> {\n        None\n    }\n\n    fn stderr(&self) -> Option<Box<dyn AsyncRead + Send + Sync + Unpin>> {\n        None\n    }\n\n    /// Set IO for passed command.\n    /// Read side of stdin, write side of stdout and write side of stderr should be provided to command.\n    async fn set(&self, cmd: &mut Command) -> Result<()>;\n\n    /// Only close write side (should be stdout/err \"from\" runc process)\n    async fn close_after_start(&self);\n}\n\n#[derive(Debug)]\npub struct PipedIo {\n    pub stdin: Option<Pipe>,\n    pub stdout: Option<Pipe>,\n    pub stderr: Option<Pipe>,\n}\n#[async_trait]\nimpl Io for PipedIo {\n    fn stdin(&self) -> Option<Box<dyn AsyncWrite + Send + Sync + Unpin>> {\n        self.stdin.as_ref().and_then(|pipe| {\n            pipe.wr_as_raw_fd().and_then(|fd| {\n                tokio_pipe::PipeWrite::from_raw_fd_checked(fd)\n                    .map(|x| Box::new(x) as Box<dyn AsyncWrite + Send + Sync + Unpin>)\n                    .ok()\n            })\n        })\n    }\n\n    fn stdout(&self) -> Option<Box<dyn AsyncRead + Send + Sync + Unpin>> {\n        self.stdout.as_ref().and_then(|pipe| {\n            let fd = pipe.rd.as_raw_fd();\n            tokio_pipe::PipeRead::from_raw_fd_checked(fd)\n                .map(|x| Box::new(x) as Box<dyn AsyncRead + Send + Sync + Unpin>)\n                .ok()\n        })\n    }\n\n    fn stderr(&self) -> Option<Box<dyn AsyncRead + Send + Sync + Unpin>> {\n        self.stderr.as_ref().and_then(|pipe| {\n            let fd = pipe.rd.as_raw_fd();\n            tokio_pipe::PipeRead::from_raw_fd_checked(fd)\n                .map(|x| Box::new(x) as Box<dyn AsyncRead + Send + Sync + Unpin>)\n                .ok()\n        })\n    }\n\n    // Note that this internally use [`std::fs::File`]'s `try_clone()`.\n    // Thus, the files passed to commands will be not closed after command exit.\n    async fn set(&self, cmd: &mut Command) -> std::io::Result<()> {\n        if let Some(p) = self.stdin.as_ref() {\n            let pr = p.rd.try_clone()?;\n            cmd.stdin(pr);\n        }\n\n        if let Some(p) = self.stdout.as_ref() {\n            let pw = p\n                .try_clone_wr()\n                .ok_or_else(|| std::io::Error::other(\"write end closed\"))?;\n            cmd.stdout(pw);\n        }\n\n        if let Some(p) = self.stderr.as_ref() {\n            let pw = p\n                .try_clone_wr()\n                .ok_or_else(|| std::io::Error::other(\"write end closed\"))?;\n            cmd.stdout(pw);\n        }\n\n        Ok(())\n    }\n\n    async fn close_after_start(&self) {\n        if let Some(p) = self.stdout.as_ref() {\n            p.close_wr();\n        }\n\n        if let Some(p) = self.stderr.as_ref() {\n            p.close_wr();\n        }\n    }\n}\n"
  },
  {
    "path": "crates/runc/src/asynchronous/pipe.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{\n    os::unix::io::{AsRawFd, OwnedFd, RawFd},\n    sync::Mutex,\n};\n\n/// Struct to represent a pipe that can be used to transfer stdio inputs and outputs.\n///\n/// With this Io driver, methods of [crate::Runc] may capture the output/error messages.\n/// When one side of the pipe is closed, the state will be represented with [`None`].\n#[derive(Debug)]\npub struct Pipe {\n    pub rd: OwnedFd,\n    wr: Mutex<Option<OwnedFd>>,\n}\n\nimpl Pipe {\n    pub fn new() -> std::io::Result<Self> {\n        let (rd, wr) = std::io::pipe()?;\n        Ok(Self {\n            rd: OwnedFd::from(rd),\n            wr: Mutex::new(Some(OwnedFd::from(wr))),\n        })\n    }\n\n    /// Return the raw fd of the write end. Returns `None` if closed.\n    pub fn wr_as_raw_fd(&self) -> Option<RawFd> {\n        self.wr.lock().unwrap().as_ref().map(|w| w.as_raw_fd())\n    }\n\n    /// Clone the write end. Returns `None` if closed.\n    pub fn try_clone_wr(&self) -> Option<OwnedFd> {\n        self.wr\n            .lock()\n            .unwrap()\n            .as_ref()\n            .and_then(|w| w.try_clone().ok())\n    }\n\n    /// Close the write end by dropping it. No-op if already closed.\n    pub fn close_wr(&self) {\n        let _ = self.wr.lock().unwrap().take();\n    }\n\n    /// Take ownership of the write end. Returns `None` if already closed.\n    pub fn take_wr(&self) -> Option<OwnedFd> {\n        self.wr.lock().unwrap().take()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::os::fd::IntoRawFd;\n\n    use tokio::{\n        io::{AsyncReadExt, AsyncWriteExt},\n        net::unix::pipe,\n    };\n\n    use super::*;\n\n    #[tokio::test]\n    async fn test_pipe_creation() {\n        let pipe = Pipe::new().expect(\"Failed to create pipe\");\n        let wr = pipe.take_wr().unwrap();\n        assert!(\n            pipe.rd.into_raw_fd() >= 0,\n            \"Read file descriptor is invalid\"\n        );\n        assert!(wr.into_raw_fd() >= 0, \"Write file descriptor is invalid\");\n    }\n\n    #[tokio::test]\n    async fn test_pipe_write_read() {\n        let pipe = Pipe::new().expect(\"Failed to create pipe\");\n        let mut write_end = pipe::Sender::from_owned_fd(pipe.take_wr().unwrap()).unwrap();\n        let mut read_end = pipe::Receiver::from_owned_fd(pipe.rd).unwrap();\n        let write_data = b\"hello\";\n\n        write_end\n            .write_all(write_data)\n            .await\n            .expect(\"Failed to write to pipe\");\n\n        let mut read_data = vec![0; write_data.len()];\n        read_end\n            .read_exact(&mut read_data)\n            .await\n            .expect(\"Failed to read from pipe\");\n\n        assert_eq!(\n            read_data, write_data,\n            \"Data read from pipe does not match data written\"\n        );\n    }\n\n    #[tokio::test]\n    async fn test_pipe_async_write_read() {\n        let pipe = Pipe::new().expect(\"Failed to create pipe\");\n        let mut write_end = pipe::Sender::from_owned_fd(pipe.take_wr().unwrap()).unwrap();\n        let mut read_end = pipe::Receiver::from_owned_fd(pipe.rd).unwrap();\n\n        let write_data = b\"hello\";\n        tokio::spawn(async move {\n            write_end\n                .write_all(write_data)\n                .await\n                .expect(\"Failed to write to pipe\");\n        });\n\n        let mut read_data = vec![0; write_data.len()];\n        read_end\n            .read_exact(&mut read_data)\n            .await\n            .expect(\"Failed to read from pipe\");\n\n        assert_eq!(\n            &read_data, write_data,\n            \"Data read from pipe does not match data written\"\n        );\n    }\n}\n"
  },
  {
    "path": "crates/runc/src/asynchronous/runc.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{fmt::Debug, path::Path, process::ExitStatus};\n\nuse async_trait::async_trait;\nuse log::debug;\nuse oci_spec::runtime::{LinuxResources, Process};\n\nuse crate::{\n    container::Container,\n    error::Error,\n    events,\n    options::*,\n    utils::{self, write_value_to_temp_file},\n    Command, Response, Result, Runc,\n};\n\n// a macro tool to cleanup the file with name $filename,\n// there is no async drop in async rust, so we have to call remove_file everytime\n// after a temp file created, before return of a function.\n// with this macro we don't have to write the match case codes everytime.\nmacro_rules! tc {\n    ($b:expr, $filename: expr) => {\n        match $b {\n            Ok(r) => r,\n            Err(e) => {\n                let _ = tokio::fs::remove_file($filename).await;\n                return Err(e);\n            }\n        }\n    };\n}\n\n/// Async implementation for [Runc].\n///\n/// Note that you MUST use this client on tokio runtime, as this client internally use [`tokio::process::Command`]\n/// and some other utilities.\nimpl Runc {\n    pub(crate) async fn launch(&self, mut cmd: Command, combined_output: bool) -> Result<Response> {\n        debug!(\"Execute command {:?}\", cmd);\n        unsafe {\n            cmd.pre_exec(move || {\n                #[cfg(target_os = \"linux\")]\n                if let Ok(thp) = std::env::var(\"THP_DISABLED\") {\n                    if let Ok(thp_disabled) = thp.parse::<bool>() {\n                        let ret = libc::prctl(\n                            libc::PR_SET_THP_DISABLE,\n                            if thp_disabled { 1u64 } else { 0u64 },\n                            0,\n                            0,\n                            0,\n                        );\n                        if ret < 0 {\n                            debug!(\"set_thp_disable err: {}\", std::io::Error::last_os_error());\n                        }\n                    }\n                }\n                Ok(())\n            });\n        }\n\n        let (status, pid, stdout, stderr) = self.spawner.execute(cmd).await?;\n        if status.success() {\n            let output = if combined_output {\n                stdout + stderr.as_str()\n            } else {\n                stdout\n            };\n            Ok(Response {\n                pid,\n                status,\n                output,\n            })\n        } else {\n            Err(Error::CommandFailed {\n                status,\n                stdout,\n                stderr,\n            })\n        }\n    }\n\n    /// Create a new container\n    pub async fn create<P>(\n        &self,\n        id: &str,\n        bundle: P,\n        opts: Option<&CreateOpts>,\n    ) -> Result<Response>\n    where\n        P: AsRef<Path>,\n    {\n        let mut args = vec![\n            \"create\".to_string(),\n            \"--bundle\".to_string(),\n            utils::abs_string(bundle)?,\n        ];\n        if let Some(opts) = opts {\n            args.append(&mut opts.args()?);\n        }\n        args.push(id.to_string());\n        let mut cmd = self.command(&args)?;\n        match opts {\n            Some(CreateOpts { io: Some(io), .. }) => {\n                io.set(&mut cmd).await.map_err(Error::UnavailableIO)?;\n                let res = self.launch(cmd, true).await?;\n                io.close_after_start().await;\n                Ok(res)\n            }\n            _ => self.launch(cmd, true).await,\n        }\n    }\n\n    /// Delete a container\n    pub async fn delete(&self, id: &str, opts: Option<&DeleteOpts>) -> Result<()> {\n        let mut args = vec![\"delete\".to_string()];\n        if let Some(opts) = opts {\n            args.append(&mut opts.args());\n        }\n        args.push(id.to_string());\n        let _ = self.launch(self.command(&args)?, true).await?;\n        Ok(())\n    }\n\n    /// Return an event stream of container notifications\n    pub async fn events(&self, _id: &str, _interval: &std::time::Duration) -> Result<()> {\n        Err(Error::Unimplemented(\"events\".to_string()))\n    }\n\n    /// Execute an additional process inside the container\n    pub async fn exec(&self, id: &str, spec: &Process, opts: Option<&ExecOpts>) -> Result<()> {\n        let f = write_value_to_temp_file(spec).await?;\n        let mut args = vec![\"exec\".to_string(), \"--process\".to_string(), f.clone()];\n        if let Some(opts) = opts {\n            args.append(&mut tc!(opts.args(), &f));\n        }\n        args.push(id.to_string());\n        let mut cmd = self.command(&args)?;\n        match opts {\n            Some(ExecOpts { io: Some(io), .. }) => {\n                tc!(\n                    io.set(&mut cmd)\n                        .await\n                        .map_err(|e| Error::IoSet(e.to_string())),\n                    &f\n                );\n                tc!(self.launch(cmd, true).await, &f);\n                io.close_after_start().await;\n            }\n            _ => {\n                tc!(self.launch(cmd, true).await, &f);\n            }\n        }\n        let _ = tokio::fs::remove_file(&f).await;\n        Ok(())\n    }\n\n    /// Send the specified signal to processes inside the container\n    pub async fn kill(&self, id: &str, sig: u32, opts: Option<&KillOpts>) -> Result<()> {\n        let mut args = vec![\"kill\".to_string()];\n        if let Some(opts) = opts {\n            args.append(&mut opts.args());\n        }\n        args.push(id.to_string());\n        args.push(sig.to_string());\n        let _ = self.launch(self.command(&args)?, true).await?;\n        Ok(())\n    }\n\n    /// List all containers associated with this runc instance\n    pub async fn list(&self) -> Result<Vec<Container>> {\n        let args = [\"list\".to_string(), \"--format=json\".to_string()];\n        let res = self.launch(self.command(&args)?, true).await?;\n        let output = res.output.trim();\n\n        // Ugly hack to work around golang\n        Ok(if output == \"null\" {\n            Vec::new()\n        } else {\n            serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)?\n        })\n    }\n\n    /// Pause a container\n    pub async fn pause(&self, id: &str) -> Result<()> {\n        let args = [\"pause\".to_string(), id.to_string()];\n        let _ = self.launch(self.command(&args)?, true).await?;\n        Ok(())\n    }\n\n    /// Resume a container\n    pub async fn resume(&self, id: &str) -> Result<()> {\n        let args = [\"resume\".to_string(), id.to_string()];\n        let _ = self.launch(self.command(&args)?, true).await?;\n        Ok(())\n    }\n\n    pub async fn checkpoint(&self) -> Result<()> {\n        Err(Error::Unimplemented(\"checkpoint\".to_string()))\n    }\n\n    pub async fn restore(&self) -> Result<()> {\n        Err(Error::Unimplemented(\"restore\".to_string()))\n    }\n\n    /// List all the processes inside the container, returning their pids\n    pub async fn ps(&self, id: &str) -> Result<Vec<usize>> {\n        let args = [\n            \"ps\".to_string(),\n            \"--format=json\".to_string(),\n            id.to_string(),\n        ];\n        let res = self.launch(self.command(&args)?, true).await?;\n        let output = res.output.trim();\n\n        // Ugly hack to work around golang\n        Ok(if output == \"null\" {\n            Vec::new()\n        } else {\n            serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)?\n        })\n    }\n\n    /// Run the create, start, delete lifecycle of the container and return its exit status\n    pub async fn run<P>(&self, id: &str, bundle: P, opts: Option<&CreateOpts>) -> Result<()>\n    where\n        P: AsRef<Path>,\n    {\n        let mut args = vec![\n            \"run\".to_string(),\n            \"--bundle\".to_string(),\n            utils::abs_string(bundle)?,\n        ];\n        if let Some(opts) = opts {\n            args.append(&mut opts.args()?);\n        }\n        args.push(id.to_string());\n        let mut cmd = self.command(&args)?;\n        if let Some(CreateOpts { io: Some(io), .. }) = opts {\n            io.set(&mut cmd)\n                .await\n                .map_err(|e| Error::IoSet(e.to_string()))?;\n        };\n        let _ = self.launch(cmd, true).await?;\n        Ok(())\n    }\n\n    /// Start an already created container\n    pub async fn start(&self, id: &str) -> Result<()> {\n        let args = vec![\"start\".to_string(), id.to_string()];\n        let _ = self.launch(self.command(&args)?, true).await?;\n        Ok(())\n    }\n\n    /// Return the state of a container\n    pub async fn state(&self, id: &str) -> Result<Container> {\n        let args = vec![\"state\".to_string(), id.to_string()];\n        let res = self.launch(self.command(&args)?, true).await?;\n        serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed)\n    }\n\n    /// Return the latest statistics for a container\n    pub async fn stats(&self, id: &str) -> Result<events::Stats> {\n        let args = vec![\"events\".to_string(), \"--stats\".to_string(), id.to_string()];\n        let res = self.launch(self.command(&args)?, true).await?;\n        let event: events::Event =\n            serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed)?;\n        if let Some(stats) = event.stats {\n            Ok(stats)\n        } else {\n            Err(Error::MissingContainerStats)\n        }\n    }\n\n    /// Update a container with the provided resource spec\n    pub async fn update(&self, id: &str, resources: &LinuxResources) -> Result<()> {\n        let f = write_value_to_temp_file(resources).await?;\n        let args = [\n            \"update\".to_string(),\n            \"--resources\".to_string(),\n            f.to_string(),\n            id.to_string(),\n        ];\n        let _ = tc!(self.launch(self.command(&args)?, true).await, &f);\n        let _ = tokio::fs::remove_file(&f).await;\n        Ok(())\n    }\n}\n\n#[async_trait]\npub trait Spawner: Debug {\n    async fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)>;\n}\n\n#[derive(Debug)]\npub struct DefaultExecutor {}\n\n#[async_trait]\nimpl Spawner for DefaultExecutor {\n    async fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)> {\n        let mut cmd = cmd;\n        let child = cmd.spawn().map_err(Error::ProcessSpawnFailed)?;\n        let pid = child.id().unwrap();\n        let result = child\n            .wait_with_output()\n            .await\n            .map_err(Error::InvalidCommand)?;\n        let status = result.status;\n        let stdout = String::from_utf8_lossy(&result.stdout).to_string();\n        let stderr = String::from_utf8_lossy(&result.stderr).to_string();\n        Ok((status, pid, stdout, stderr))\n    }\n}\n\n#[cfg(test)]\n#[cfg(target_os = \"linux\")]\nmod tests {\n    use std::sync::Arc;\n\n    use crate::{\n        error::Error,\n        io::{InheritedStdIo, PipedStdIo},\n        options::{CreateOpts, DeleteOpts, GlobalOpts},\n        Runc,\n    };\n\n    fn ok_client() -> Runc {\n        GlobalOpts::new()\n            .command(\"/bin/true\")\n            .build()\n            .expect(\"unable to create runc instance\")\n    }\n\n    fn fail_client() -> Runc {\n        GlobalOpts::new()\n            .command(\"/bin/false\")\n            .build()\n            .expect(\"unable to create runc instance\")\n    }\n\n    fn echo_client() -> Runc {\n        GlobalOpts::new()\n            .command(\"/bin/echo\")\n            .build()\n            .expect(\"unable to create runc instance\")\n    }\n\n    #[tokio::test]\n    async fn test_async_create() {\n        let opts = CreateOpts::new();\n        let ok_runc = ok_client();\n        let ok_task = tokio::spawn(async move {\n            let response = ok_runc\n                .create(\"fake-id\", \"fake-bundle\", Some(&opts))\n                .await\n                .expect(\"true failed.\");\n            assert_ne!(response.pid, 0);\n            assert!(response.status.success());\n            assert!(response.output.is_empty());\n        });\n\n        let opts = CreateOpts::new();\n        let fail_runc = fail_client();\n        let fail_task = tokio::spawn(async move {\n            match fail_runc\n                .create(\"fake-id\", \"fake-bundle\", Some(&opts))\n                .await\n            {\n                Ok(_) => panic!(\"fail_runc returned exit status 0.\"),\n                Err(Error::CommandFailed {\n                    status,\n                    stdout,\n                    stderr,\n                }) => {\n                    if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {\n                        eprintln!(\"fail_runc succeeded.\");\n                    } else {\n                        panic!(\"unexpected outputs from fail_runc.\")\n                    }\n                }\n                Err(e) => panic!(\"unexpected error from fail_runc: {:?}\", e),\n            }\n        });\n\n        ok_task.await.expect(\"ok_task failed.\");\n        fail_task.await.expect(\"fail_task unexpectedly succeeded.\");\n    }\n\n    #[tokio::test]\n    async fn test_async_start() {\n        let ok_runc = ok_client();\n        let ok_task = tokio::spawn(async move {\n            ok_runc.start(\"fake-id\").await.expect(\"true failed.\");\n            eprintln!(\"ok_runc succeeded.\");\n        });\n\n        let fail_runc = fail_client();\n        let fail_task = tokio::spawn(async move {\n            match fail_runc.start(\"fake-id\").await {\n                Ok(_) => panic!(\"fail_runc returned exit status 0.\"),\n                Err(Error::CommandFailed {\n                    status,\n                    stdout,\n                    stderr,\n                }) => {\n                    if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {\n                        eprintln!(\"fail_runc succeeded.\");\n                    } else {\n                        panic!(\"unexpected outputs from fail_runc.\")\n                    }\n                }\n                Err(e) => panic!(\"unexpected error from fail_runc: {:?}\", e),\n            }\n        });\n\n        ok_task.await.expect(\"ok_task failed.\");\n        fail_task.await.expect(\"fail_task unexpectedly succeeded.\");\n    }\n\n    #[tokio::test]\n    async fn test_async_run() {\n        let opts = CreateOpts::new();\n        let ok_runc = ok_client();\n        tokio::spawn(async move {\n            ok_runc\n                .create(\"fake-id\", \"fake-bundle\", Some(&opts))\n                .await\n                .expect(\"true failed.\");\n            eprintln!(\"ok_runc succeeded.\");\n        });\n\n        let opts = CreateOpts::new();\n        let fail_runc = fail_client();\n        tokio::spawn(async move {\n            match fail_runc\n                .create(\"fake-id\", \"fake-bundle\", Some(&opts))\n                .await\n            {\n                Ok(_) => panic!(\"fail_runc returned exit status 0.\"),\n                Err(Error::CommandFailed {\n                    status,\n                    stdout,\n                    stderr,\n                }) => {\n                    if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {\n                        eprintln!(\"fail_runc succeeded.\");\n                    } else {\n                        panic!(\"unexpected outputs from fail_runc.\")\n                    }\n                }\n                Err(e) => panic!(\"unexpected error from fail_runc: {:?}\", e),\n            }\n        })\n        .await\n        .expect(\"tokio spawn falied.\");\n    }\n\n    #[tokio::test]\n    async fn test_async_delete() {\n        let opts = DeleteOpts::new();\n        let ok_runc = ok_client();\n        tokio::spawn(async move {\n            ok_runc\n                .delete(\"fake-id\", Some(&opts))\n                .await\n                .expect(\"true failed.\");\n            eprintln!(\"ok_runc succeeded.\");\n        });\n\n        let opts = DeleteOpts::new();\n        let fail_runc = fail_client();\n        tokio::spawn(async move {\n            match fail_runc.delete(\"fake-id\", Some(&opts)).await {\n                Ok(_) => panic!(\"fail_runc returned exit status 0.\"),\n                Err(Error::CommandFailed {\n                    status,\n                    stdout,\n                    stderr,\n                }) => {\n                    if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {\n                        eprintln!(\"fail_runc succeeded.\");\n                    } else {\n                        panic!(\"unexpected outputs from fail_runc.\")\n                    }\n                }\n                Err(e) => panic!(\"unexpected error from fail_runc: {:?}\", e),\n            }\n        })\n        .await\n        .expect(\"tokio spawn falied.\");\n    }\n\n    #[tokio::test]\n    async fn test_async_output() {\n        // test create cmd with inherit Io, expect empty cmd output\n        let mut opts = CreateOpts::new();\n        opts.io = Some(Arc::new(InheritedStdIo::new().unwrap()));\n        let echo_runc = echo_client();\n        let response = echo_runc\n            .create(\"fake-id\", \"fake-bundle\", Some(&opts))\n            .await\n            .expect(\"echo failed:\");\n        assert_ne!(response.pid, 0);\n        assert!(response.status.success());\n        assert!(response.output.is_empty());\n\n        // test create cmd with pipe Io, expect nonempty cmd output\n        let mut opts = CreateOpts::new();\n        opts.io = Some(Arc::new(PipedStdIo::new().unwrap()));\n        let response = echo_runc\n            .create(\"fake-id\", \"fake-bundle\", Some(&opts))\n            .await\n            .expect(\"echo failed:\");\n        assert_ne!(response.pid, 0);\n        assert!(response.status.success());\n        assert!(!response.output.is_empty());\n    }\n}\n"
  },
  {
    "path": "crates/runc/src/container.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n// Forked from https://github.com/pwFoo/rust-runc/blob/313e6ae5a79b54455b0a242a795c69adf035141a/src/lib.rs\n\n/*\n * Copyright 2020 fsyncd, Berlin, Germany.\n * Additional material, copyright of the containerd authors.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nuse std::collections::HashMap;\n\nuse serde::{Deserialize, Serialize};\nuse time::{serde::timestamp, OffsetDateTime};\n\n/// Information for runc container\n#[derive(Debug, Serialize, Deserialize)]\npub struct Container {\n    pub id: String,\n    pub pid: usize,\n    pub status: String,\n    pub bundle: String,\n    pub rootfs: String,\n    #[serde(with = \"timestamp\")]\n    pub created: OffsetDateTime,\n    pub annotations: HashMap<String, String>,\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn serde_test() {\n        let j = r#\"\n            {\n                \"id\": \"fake\",\n                \"pid\": 1000,\n                \"status\": \"RUNNING\",\n                \"bundle\": \"/path/to/bundle\",\n                \"rootfs\": \"/path/to/rootfs\",\n                \"created\": 1431684000,\n                \"annotations\": {\n                    \"foo\": \"bar\"\n                }\n            }\"#;\n\n        let c: Container = serde_json::from_str(j).unwrap();\n        assert_eq!(c.id, \"fake\");\n        assert_eq!(c.pid, 1000);\n        assert_eq!(c.status, \"RUNNING\");\n        assert_eq!(c.bundle, \"/path/to/bundle\");\n        assert_eq!(c.rootfs, \"/path/to/rootfs\");\n        assert_eq!(\n            c.created,\n            OffsetDateTime::from_unix_timestamp(1431684000).unwrap()\n        );\n        assert_eq!(c.annotations.get(\"foo\"), Some(&\"bar\".to_string()));\n        assert_eq!(c.annotations.get(\"bar\"), None);\n    }\n}\n"
  },
  {
    "path": "crates/runc/src/error.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n// Forked from https://github.com/pwFoo/rust-runc/blob/313e6ae5a79b54455b0a242a795c69adf035141a/src/lib.rs\n\n/*\n * Copyright 2020 fsyncd, Berlin, Germany.\n * Additional material, copyright of the containerd authors.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nuse std::{env, io, process::ExitStatus};\n\nuse thiserror::Error;\n\n#[derive(Error, Debug)]\npub enum Error {\n    #[error(\"Unable to extract test files: {0}\")]\n    BundleExtractFailed(io::Error),\n\n    #[error(\"Invalid path: {0}\")]\n    InvalidPath(io::Error),\n\n    #[error(transparent)]\n    JsonDeserializationFailed(#[from] serde_json::error::Error),\n\n    #[error(\"Missing container statistics\")]\n    MissingContainerStats,\n\n    #[error(transparent)]\n    ProcessSpawnFailed(io::Error),\n\n    #[error(\"Error occured in runc: {0}\")]\n    InvalidCommand(io::Error),\n\n    #[error(\"Runc command failed: status={status}, stdout=\\\"{stdout}\\\", stderr=\\\"{stderr}\\\"\")]\n    CommandFailed {\n        status: ExitStatus,\n        stdout: String,\n        stderr: String,\n    },\n\n    #[error(\"Runc IO unavailable: {0}\")]\n    UnavailableIO(io::Error),\n\n    #[cfg(feature = \"async\")]\n    #[error(\"Runc command timed out: {0}\")]\n    CommandTimeout(tokio::time::error::Elapsed),\n\n    #[error(\"Unable to parse runc version\")]\n    InvalidVersion,\n\n    #[error(\"Unable to locate the runc\")]\n    NotFound,\n\n    #[error(\"Error occurs with fs: {0}\")]\n    FileSystemError(io::Error),\n\n    #[error(\"Failed to spec file: {0}\")]\n    SpecFileCreationFailed(io::Error),\n\n    #[error(transparent)]\n    SpecFileCleanupFailed(io::Error),\n\n    #[error(\"Failed to find valid path for spec file\")]\n    SpecFileNotFound,\n\n    #[error(\"Top command is missing a pid header\")]\n    TopMissingPidHeader,\n\n    #[error(\"Top command returned an empty response\")]\n    TopShortResponseError,\n\n    #[error(\"Unix socket connection error: {0}\")]\n    UnixSocketConnectionFailed(io::Error),\n\n    #[error(\"Unable to bind to unix socket: {0}\")]\n    UnixSocketBindFailed(io::Error),\n\n    #[error(\"Unix socket failed to receive pty\")]\n    UnixSocketReceiveMessageFailed,\n\n    #[error(\"Unix socket unexpectedly closed\")]\n    UnixSocketClosed,\n\n    #[error(\"Failed to handle environment variable: {0}\")]\n    EnvError(env::VarError),\n\n    #[error(\"Sorry, this part of api is not implemented: {0}\")]\n    Unimplemented(String),\n\n    #[error(\"Error occured in runc client: {0}\")]\n    Other(Box<dyn std::error::Error + Send>),\n\n    #[error(\"Failed to set cmd io: {0}\")]\n    IoSet(String),\n\n    #[error(\"Failed to create dir: {0}\")]\n    CreateDir(nix::Error),\n}\n"
  },
  {
    "path": "crates/runc/src/events.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n// Forked from https://github.com/pwFoo/rust-runc/blob/313e6ae5a79b54455b0a242a795c69adf035141a/src/events.rs\n\n/*\n * Copyright 2020 fsyncd, Berlin, Germany.\n * Additional material, copyright of the containerd authors.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nuse std::collections::HashMap;\n\nuse serde::{Deserialize, Serialize};\n\n/// Event type generated by runc\n#[derive(Debug, Clone, Serialize, Deserialize)]\n#[serde(rename_all(serialize = \"lowercase\", deserialize = \"lowercase\"))]\npub enum EventType {\n    /// Statistics\n    Stats,\n    /// Out of memory\n    Oom,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Event {\n    #[serde(rename = \"type\")]\n    pub event_type: EventType,\n    pub id: String,\n    #[serde(rename = \"data\")]\n    pub stats: Option<Stats>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Stats {\n    pub cpu: Cpu,\n    pub memory: Memory,\n    pub pids: Pids,\n    #[serde(rename = \"blkio\")]\n    pub block_io: BlkIO,\n    #[serde(rename = \"hugetlb\")]\n    pub huge_tlb: HugeTLB,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct HugeTLB {\n    pub usage: Option<u64>,\n    pub max: Option<u64>,\n    #[serde(rename = \"failcnt\")]\n    pub fail_count: u64,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct BlkIOEntry {\n    pub major: Option<u64>,\n    pub minor: Option<u64>,\n    pub op: Option<String>,\n    pub value: Option<u64>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct BlkIO {\n    /// Number of bytes transferred to and from the disk\n    #[serde(rename = \"ioServiceBytesRecursive\")]\n    pub io_service_bytes_recursive: Option<Vec<BlkIOEntry>>,\n    /// Number of io requests issued to the disk\n    #[serde(rename = \"ioServicedRecursive\")]\n    pub io_serviced_recursive: Option<Vec<BlkIOEntry>>,\n    /// Number of queued disk io requests\n    #[serde(rename = \"ioQueueRecursive\")]\n    pub io_queued_recursive: Option<Vec<BlkIOEntry>>,\n    /// Amount of time io requests took to service\n    #[serde(rename = \"ioServiceTimeRecursive\")]\n    pub io_service_time_recursive: Option<Vec<BlkIOEntry>>,\n    /// Amount of time io requests spent waiting in the queue\n    #[serde(rename = \"ioWaitTimeRecursive\")]\n    pub io_wait_time_recursive: Option<Vec<BlkIOEntry>>,\n    /// Number of merged io requests\n    #[serde(rename = \"ioMergedRecursive\")]\n    pub io_merged_recursive: Option<Vec<BlkIOEntry>>,\n    /// Disk time allocated the device\n    #[serde(rename = \"ioTimeRecursive\")]\n    pub io_time_recursive: Option<Vec<BlkIOEntry>>,\n    /// Number of sectors transferred to and from the io device\n    #[serde(rename = \"sectorsRecursive\")]\n    pub sectors_recursive: Option<Vec<BlkIOEntry>>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Pids {\n    /// Number of pids in the cgroup\n    pub current: Option<u64>,\n    /// Active pids hard limit\n    pub limit: Option<u64>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Throttling {\n    /// Number of periods with throttling active\n    pub periods: Option<u64>,\n    #[serde(rename = \"throttledPeriods\")]\n    /// Number of periods when the container hit its throttling limit\n    pub throtted_periods: Option<u64>,\n    /// Aggregate time the container was throttled for in nanoseconds\n    #[serde(rename = \"throttledTime\")]\n    pub throtted_time: Option<u64>,\n}\n\n/// Each members represents time in nanoseconds\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct CpuUsage {\n    /// Total CPU time consumed\n    pub total: Option<u64>,\n    /// Total CPU time consumed per core\n    pub per_cpu: Option<Vec<u64>>,\n    /// Total CPU time consumed in kernel mode\n    pub kernel: u64,\n    /// Total CPU time consumed in user mode\n    pub user: u64,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Cpu {\n    pub usage: Option<u64>,\n    pub throttling: Option<Throttling>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct MemoryEntry {\n    /// Memory limit in bytes\n    pub limit: u64,\n    /// Usage in bytes\n    pub usage: Option<u64>,\n    /// Maximum usage in bytes\n    pub max: Option<u64>,\n    /// Count of memory allocation failures\n    #[serde(rename = \"failcnt\")]\n    pub fail_count: u64,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Memory {\n    /// Memory usage for cache\n    pub cache: Option<u64>,\n    /// Overall memory usage, excluding swap\n    pub usage: Option<MemoryEntry>,\n    /// Overall memory usage, including swap\n    pub swap: Option<MemoryEntry>,\n    /// Kernel usage of memory\n    pub kernel: Option<MemoryEntry>,\n    /// Kernel TCP of memory\n    #[serde(rename = \"kernelTCP\")]\n    pub kernel_tcp: Option<MemoryEntry>,\n    /// Raw stats of memory\n    pub raw: Option<HashMap<String, u64>>,\n}\n"
  },
  {
    "path": "crates/runc/src/lib.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n// Forked from https://github.com/pwFoo/rust-runc/blob/313e6ae5a79b54455b0a242a795c69adf035141a/src/lib.rs\n\n/*\n * Copyright 2020 fsyncd, Berlin, Germany.\n * Additional material, copyright of the containerd authors.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#![cfg_attr(feature = \"docs\", doc = include_str!(\"../README.md\"))]\n\n//! A crate for consuming the runc binary in your Rust applications, similar to\n//! [go-runc](https://github.com/containerd/go-runc) for Go.\nuse std::{\n    fmt::{self, Debug, Display},\n    path::PathBuf,\n    process::{ExitStatus, Stdio},\n    sync::Arc,\n};\n\n#[cfg(feature = \"async\")]\npub use crate::asynchronous::*;\n#[cfg(not(feature = \"async\"))]\npub use crate::synchronous::*;\n\n#[cfg(feature = \"async\")]\npub mod asynchronous;\npub mod container;\npub mod error;\npub mod events;\n#[cfg(not(feature = \"async\"))]\npub mod synchronous;\n\n#[cfg(feature = \"async\")]\npub mod monitor;\npub mod options;\npub mod utils;\n\nconst JSON: &str = \"json\";\nconst TEXT: &str = \"text\";\n\npub type Result<T> = std::result::Result<T, crate::error::Error>;\n\n/// Response is for (pid, exit status, outputs).\n#[derive(Debug, Clone)]\npub struct Response {\n    pub pid: u32,\n    pub status: ExitStatus,\n    pub output: String,\n}\n\n#[derive(Debug, Clone)]\npub struct Version {\n    pub runc_version: Option<String>,\n    pub spec_version: Option<String>,\n    pub commit: Option<String>,\n}\n\n#[derive(Debug, Clone, Default)]\npub enum LogFormat {\n    Json,\n    #[default]\n    Text,\n}\n\nimpl Display for LogFormat {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match self {\n            LogFormat::Json => write!(f, \"{}\", JSON),\n            LogFormat::Text => write!(f, \"{}\", TEXT),\n        }\n    }\n}\n\n#[cfg(not(feature = \"async\"))]\npub type Command = std::process::Command;\n\n#[cfg(feature = \"async\")]\npub type Command = tokio::process::Command;\n\n#[derive(Debug, Clone)]\npub struct Runc {\n    command: PathBuf,\n    args: Vec<String>,\n    spawner: Arc<dyn Spawner + Send + Sync>,\n}\n\nimpl Runc {\n    fn command(&self, args: &[String]) -> Result<Command> {\n        let args = [&self.args, args].concat();\n        let mut cmd = Command::new(&self.command);\n\n        // Default to piped stdio, and they may be override by command options.\n        cmd.stdin(Stdio::null())\n            .stdout(Stdio::piped())\n            .stderr(Stdio::piped());\n\n        // NOTIFY_SOCKET introduces a special behavior in runc but should only be set if invoked from systemd\n        cmd.args(&args).env_remove(\"NOTIFY_SOCKET\");\n\n        Ok(cmd)\n    }\n}\n"
  },
  {
    "path": "crates/runc/src/monitor.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::process::{ExitStatus, Output};\n\nuse async_trait::async_trait;\nuse log::error;\nuse time::OffsetDateTime;\nuse tokio::{\n    process::Command,\n    sync::oneshot::{channel, Receiver, Sender},\n};\n\nuse crate::error::Error;\n\n/// A trait for spawning and waiting for a process.\n///\n/// The design is different from Go's, because if you return a `Sender` in [ProcessMonitor::start()]\n/// and want to use it in [ProcessMonitor::wait()], then start and wait cannot be executed\n/// concurrently. Alternatively, let the caller to prepare the communication channel for\n/// [ProcessMonitor::start()] and [ProcessMonitor::wait()] so they could be executed concurrently.\n#[async_trait]\npub trait ProcessMonitor {\n    /// Spawn a process and return its output.\n    ///\n    /// In order to capture the output/error, it is necessary for the caller to create new pipes\n    /// between parent and child.\n    /// Use [tokio::process::Command::stdout(Stdio::piped())](https://docs.rs/tokio/1.16.1/tokio/process/struct.Command.html#method.stdout)\n    /// and/or [tokio::process::Command::stderr(Stdio::piped())](https://docs.rs/tokio/1.16.1/tokio/process/struct.Command.html#method.stderr)\n    /// respectively, when creating the [Command](https://docs.rs/tokio/1.16.1/tokio/process/struct.Command.html#).\n    async fn start(&self, mut cmd: Command, tx: Sender<Exit>) -> std::io::Result<Output> {\n        let chi = cmd.spawn()?;\n        // Safe to expect() because wait() hasn't been called yet, dependence on tokio interanl\n        // implementation details.\n        let pid = chi\n            .id()\n            .expect(\"failed to take pid of the container process.\");\n        let out = chi.wait_with_output().await?;\n        let ts = OffsetDateTime::now_utc();\n        // On Unix, out.status.code() will return None if the process was terminated by a signal.\n        let status = out.status.code().unwrap_or(-1);\n        match tx.send(Exit { ts, pid, status }) {\n            Ok(_) => Ok(out),\n            Err(e) => {\n                error!(\"command {:?} exited but receiver dropped.\", cmd);\n                error!(\"couldn't send messages: {:?}\", e);\n                Err(std::io::ErrorKind::ConnectionRefused.into())\n            }\n        }\n    }\n\n    /// Wait for the spawned process to exit and return the exit status.\n    async fn wait(&self, rx: Receiver<Exit>) -> std::io::Result<Exit> {\n        rx.await.map_err(|_| {\n            error!(\"sender dropped.\");\n            std::io::ErrorKind::BrokenPipe.into()\n        })\n    }\n}\n\n/// A default implementation of [ProcessMonitor].\n#[derive(Debug, Clone, Default)]\npub struct DefaultMonitor {}\n\nimpl ProcessMonitor for DefaultMonitor {}\n\nimpl DefaultMonitor {\n    pub const fn new() -> Self {\n        Self {}\n    }\n}\n\n/// Process exit status returned by [ProcessMonitor::wait()].\n#[derive(Debug)]\npub struct Exit {\n    pub ts: OffsetDateTime,\n    pub pid: u32,\n    pub status: i32,\n}\n\n/// Execution result returned by `execute()`.\npub struct ExecuteResult {\n    pub exit: Exit,\n    pub status: ExitStatus,\n    pub stdout: String,\n    pub stderr: String,\n}\n\n/// Execute a `Command` and collect exit status, output and error messages.\n///\n/// To collect output and error messages, pipes must be used for Command's stdout and stderr.\n///\n/// Note: invalid UTF-8 characters in output and error messages will be replaced with the `�` char.\npub async fn execute<T: ProcessMonitor + Send + Sync>(\n    monitor: &T,\n    cmd: Command,\n) -> Result<ExecuteResult, Error> {\n    let (tx, rx) = channel::<Exit>();\n    let start = monitor.start(cmd, tx);\n    let wait = monitor.wait(rx);\n    let (\n        Output {\n            stdout,\n            stderr,\n            status,\n        },\n        exit,\n    ) = tokio::try_join!(start, wait).map_err(Error::InvalidCommand)?;\n    let stdout = String::from_utf8_lossy(&stdout).to_string();\n    let stderr = String::from_utf8_lossy(&stderr).to_string();\n\n    Ok(ExecuteResult {\n        exit,\n        status,\n        stdout,\n        stderr,\n    })\n}\n\n#[cfg(test)]\nmod tests {\n    use std::process::Stdio;\n\n    use tokio::{process::Command, sync::oneshot::channel};\n\n    use super::*;\n\n    #[tokio::test]\n    async fn test_start_wait_without_output() {\n        let monitor = DefaultMonitor::new();\n        let cmd = Command::new(\"/bin/ls\");\n        let (tx, rx) = channel();\n\n        let output = monitor.start(cmd, tx).await.unwrap();\n        assert_eq!(output.stdout.len(), 0);\n        assert_eq!(output.stderr.len(), 0);\n        let status = monitor.wait(rx).await.unwrap();\n        assert_eq!(status.status, 0);\n    }\n\n    #[tokio::test]\n    async fn test_start_wait_with_output() {\n        let monitor = DefaultMonitor::new();\n        let mut cmd = Command::new(\"/bin/ls\");\n        cmd.stdout(Stdio::piped());\n        let (tx, rx) = channel();\n\n        let output = monitor.start(cmd, tx).await.unwrap();\n        assert!(!output.stdout.is_empty());\n        assert_eq!(output.stderr.len(), 0);\n        let status = monitor.wait(rx).await.unwrap();\n        assert_eq!(status.status, 0);\n    }\n\n    #[tokio::test]\n    async fn test_execute() {\n        let mut cmd = Command::new(\"/bin/ls\");\n        cmd.stdout(Stdio::piped());\n        let monitor = DefaultMonitor::new();\n        let result = execute(&monitor, cmd).await.unwrap();\n\n        assert_eq!(result.exit.status, 0);\n        assert!(result.status.success());\n        assert!(!result.stdout.is_empty());\n        assert_eq!(result.stderr.len(), 0);\n    }\n}\n"
  },
  {
    "path": "crates/runc/src/options.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n// Forked from https://github.com/pwFoo/rust-runc/blob/313e6ae5a79b54455b0a242a795c69adf035141a/src/lib.rs\n\n/*\n * Copyright 2020 fsyncd, Berlin, Germany.\n * Additional material, copyright of the containerd authors.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *     http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\nuse std::{\n    path::{Path, PathBuf},\n    sync::Arc,\n    time::Duration,\n};\n\nuse crate::{error::Error, utils, DefaultExecutor, Io, LogFormat, Runc, Spawner};\n\n// constants for log format\npub const JSON: &str = \"json\";\npub const TEXT: &str = \"text\";\n\n// constants for runc global flags\nconst DEBUG: &str = \"--debug\";\nconst LOG: &str = \"--log\";\nconst LOG_FORMAT: &str = \"--log-format\";\nconst ROOT: &str = \"--root\";\nconst ROOTLESS: &str = \"--rootless\";\nconst SYSTEMD_CGROUP: &str = \"--systemd-cgroup\";\n\n// constants for runc-create/runc-exec flags\nconst CONSOLE_SOCKET: &str = \"--console-socket\";\nconst DETACH: &str = \"--detach\";\nconst NO_NEW_KEYRING: &str = \"--no-new-keyring\";\nconst NO_PIVOT: &str = \"--no-pivot\";\nconst PID_FILE: &str = \"--pid-file\";\n\n// constants for runc-kill flags\nconst ALL: &str = \"--all\";\n\n// constants for runc-delete flags\nconst FORCE: &str = \"--force\";\n\n// constant for command\npub const DEFAULT_COMMAND: &str = \"runc\";\n\npub trait Args {\n    type Output;\n\n    fn args(&self) -> Self::Output;\n}\n\n/// Global options builder for the runc binary.\n///\n/// These options will be passed for all subsequent runc calls.\n/// See <https://github.com/opencontainers/runc/blob/main/man/runc.8.md#global-options>\n#[derive(Debug, Default)]\npub struct GlobalOpts {\n    /// Override the name of the runc binary. If [`None`], `runc` is used.\n    command: Option<PathBuf>,\n    /// Debug logging.\n    ///\n    /// If true, debug level logs are emitted.\n    debug: bool,\n    /// Path to log file.\n    log: Option<PathBuf>,\n    /// Log format to use.\n    log_format: LogFormat,\n    /// Path to root directory of container rootfs.\n    root: Option<PathBuf>,\n    /// Whether to use rootless mode.\n    ///\n    /// If [`None`], `auto` settings is used.\n    /// Note that \"auto\" is different from explicit \"true\" or \"false\".\n    rootless: Option<bool>,\n    /// Set process group ID (gpid).\n    set_pgid: bool,\n    /// Use systemd cgroup.\n    systemd_cgroup: bool,\n    /// Timeout settings for runc command.\n    ///\n    /// Default is 5 seconds.\n    /// This will be used only in AsyncClient.\n    timeout: Duration,\n    /// executor that runs the commands\n    executor: Option<Arc<dyn Spawner + Send + Sync>>,\n}\n\nimpl GlobalOpts {\n    /// Create new config builder with no options.\n    pub fn new() -> Self {\n        Default::default()\n    }\n\n    pub fn command(mut self, command: impl AsRef<Path>) -> Self {\n        self.command = Some(command.as_ref().to_path_buf());\n        self\n    }\n\n    /// Set the root directory to store containers' state.\n    ///\n    /// The path should be located on tmpfs.\n    /// Default is `/run/runc`, or `$XDG_RUNTIME_DIR/runc` for rootless containers.\n    pub fn root(mut self, root: impl AsRef<Path>) -> Self {\n        self.root = Some(root.as_ref().to_path_buf());\n        self\n    }\n\n    /// Enable debug logging.\n    pub fn debug(mut self, debug: bool) -> Self {\n        self.debug = debug;\n        self\n    }\n\n    /// Set the log destination to path.\n    ///\n    /// The default is to log to stderr.\n    pub fn log(mut self, log: impl AsRef<Path>) -> Self {\n        self.log = Some(log.as_ref().to_path_buf());\n        self\n    }\n\n    /// Set the log format (default is text).\n    pub fn log_format(mut self, log_format: LogFormat) -> Self {\n        self.log_format = log_format;\n        self\n    }\n\n    /// Set the log format to JSON.\n    pub fn log_json(self) -> Self {\n        self.log_format(LogFormat::Json)\n    }\n\n    /// Set the log format to TEXT.\n    pub fn log_text(self) -> Self {\n        self.log_format(LogFormat::Text)\n    }\n\n    /// Enable systemd cgroup support.\n    ///\n    /// If this is set, the container spec (`config.json`) is expected to have `cgroupsPath` value in\n    // the `slice:prefix:name` form (e.g. `system.slice:runc:434234`).\n    pub fn systemd_cgroup(mut self, systemd_cgroup: bool) -> Self {\n        self.systemd_cgroup = systemd_cgroup;\n        self\n    }\n\n    /// Enable or disable rootless mode.\n    ///\n    // Default is auto, meaning to auto-detect whether rootless should be enabled.\n    pub fn rootless(mut self, rootless: bool) -> Self {\n        self.rootless = Some(rootless);\n        self\n    }\n\n    /// Set rootless mode to auto.\n    pub fn rootless_auto(mut self) -> Self {\n        self.rootless = None;\n        self\n    }\n\n    pub fn set_pgid(mut self, set_pgid: bool) -> Self {\n        self.set_pgid = set_pgid;\n        self\n    }\n\n    pub fn timeout(&mut self, millis: u64) -> &mut Self {\n        self.timeout = Duration::from_millis(millis);\n        self\n    }\n\n    pub fn custom_spawner(&mut self, executor: Arc<dyn Spawner + Send + Sync>) -> &mut Self {\n        self.executor = Some(executor);\n        self\n    }\n\n    pub fn build(self) -> Result<Runc, Error> {\n        self.args()\n    }\n\n    fn output(&self) -> Result<(PathBuf, Vec<String>), Error> {\n        let path = self\n            .command\n            .clone()\n            .unwrap_or_else(|| PathBuf::from(\"runc\"));\n\n        let command = utils::binary_path(path).ok_or(Error::NotFound)?;\n\n        let mut args = Vec::new();\n\n        // --root path : Set the root directory to store containers' state.\n        if let Some(root) = &self.root {\n            args.push(ROOT.into());\n            args.push(utils::abs_string(root)?);\n        }\n\n        // --debug : Enable debug logging.\n        if self.debug {\n            args.push(DEBUG.into());\n        }\n\n        // --log path : Set the log destination to path. The default is to log to stderr.\n        if let Some(log_path) = &self.log {\n            args.push(LOG.into());\n            args.push(utils::abs_string(log_path)?);\n        }\n\n        // --log-format text|json : Set the log format (default is text).\n        args.push(LOG_FORMAT.into());\n        args.push(self.log_format.to_string());\n\n        // --systemd-cgroup : Enable systemd cgroup support.\n        if self.systemd_cgroup {\n            args.push(SYSTEMD_CGROUP.into());\n        }\n\n        // --rootless true|false|auto : Enable or disable rootless mode.\n        if let Some(mode) = self.rootless {\n            let arg = format!(\"{}={}\", ROOTLESS, mode);\n            args.push(arg);\n        }\n        Ok((command, args))\n    }\n}\n\nimpl Args for GlobalOpts {\n    type Output = Result<Runc, Error>;\n\n    fn args(&self) -> Self::Output {\n        let (command, args) = self.output()?;\n        let executor = if let Some(exec) = self.executor.clone() {\n            exec\n        } else {\n            Arc::new(DefaultExecutor {})\n        };\n        Ok(Runc {\n            command,\n            args,\n            spawner: executor,\n        })\n    }\n}\n\n#[derive(Clone, Default)]\npub struct CreateOpts {\n    pub io: Option<Arc<dyn Io>>,\n    /// Path to where a pid file should be created.\n    pub pid_file: Option<PathBuf>,\n    /// Path to where a console socket should be created.\n    pub console_socket: Option<PathBuf>,\n    /// Detach from the container's process (only available for run)\n    pub detach: bool,\n    /// Don't use pivot_root to jail process inside rootfs.\n    pub no_pivot: bool,\n    /// A new session keyring for the container will not be created.\n    pub no_new_keyring: bool,\n}\n\nimpl Args for CreateOpts {\n    type Output = Result<Vec<String>, Error>;\n\n    fn args(&self) -> Self::Output {\n        let mut args: Vec<String> = vec![];\n        if let Some(pid_file) = &self.pid_file {\n            args.push(PID_FILE.to_string());\n            args.push(utils::abs_string(pid_file)?);\n        }\n        if let Some(console_socket) = &self.console_socket {\n            args.push(CONSOLE_SOCKET.to_string());\n            args.push(utils::abs_string(console_socket)?);\n        }\n        if self.no_pivot {\n            args.push(NO_PIVOT.to_string());\n        }\n        if self.no_new_keyring {\n            args.push(NO_NEW_KEYRING.to_string());\n        }\n        if self.detach {\n            args.push(DETACH.to_string());\n        }\n        Ok(args)\n    }\n}\n\nimpl CreateOpts {\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    pub fn io(mut self, io: Arc<dyn Io>) -> Self {\n        self.io = Some(io);\n        self\n    }\n\n    pub fn pid_file<P>(mut self, pid_file: P) -> Self\n    where\n        P: AsRef<Path>,\n    {\n        self.pid_file = Some(pid_file.as_ref().to_path_buf());\n        self\n    }\n\n    pub fn console_socket<P>(mut self, console_socket: P) -> Self\n    where\n        P: AsRef<Path>,\n    {\n        self.console_socket = Some(console_socket.as_ref().to_path_buf());\n        self\n    }\n\n    pub fn detach(mut self, detach: bool) -> Self {\n        self.detach = detach;\n        self\n    }\n\n    pub fn no_pivot(mut self, no_pivot: bool) -> Self {\n        self.no_pivot = no_pivot;\n        self\n    }\n\n    pub fn no_new_keyring(mut self, no_new_keyring: bool) -> Self {\n        self.no_new_keyring = no_new_keyring;\n        self\n    }\n}\n\n/// Container execution options\n#[derive(Clone, Default)]\npub struct ExecOpts {\n    pub io: Option<Arc<dyn Io>>,\n    /// Path to where a pid file should be created.\n    pub pid_file: Option<PathBuf>,\n    /// Path to where a console socket should be created.\n    pub console_socket: Option<PathBuf>,\n    /// Detach from the container's process (only available for run)\n    pub detach: bool,\n}\n\nimpl Args for ExecOpts {\n    type Output = Result<Vec<String>, Error>;\n\n    fn args(&self) -> Self::Output {\n        let mut args: Vec<String> = vec![];\n        if let Some(pid_file) = &self.pid_file {\n            args.push(PID_FILE.to_string());\n            args.push(utils::abs_string(pid_file)?);\n        }\n        if let Some(console_socket) = &self.console_socket {\n            args.push(CONSOLE_SOCKET.to_string());\n            args.push(utils::abs_string(console_socket)?);\n        }\n        if self.detach {\n            args.push(DETACH.to_string());\n        }\n        Ok(args)\n    }\n}\n\nimpl ExecOpts {\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    pub fn io(mut self, io: Arc<dyn Io>) -> Self {\n        self.io = Some(io);\n        self\n    }\n\n    pub fn pid_file<P>(mut self, pid_file: P) -> Self\n    where\n        P: AsRef<Path>,\n    {\n        self.pid_file = Some(pid_file.as_ref().to_path_buf());\n        self\n    }\n\n    pub fn console_socket<P>(mut self, console_socket: P) -> Self\n    where\n        P: AsRef<Path>,\n    {\n        self.console_socket = Some(console_socket.as_ref().to_path_buf());\n        self\n    }\n\n    pub fn detach(mut self, detach: bool) -> Self {\n        self.detach = detach;\n        self\n    }\n}\n\n/// Container deletion options\n#[derive(Debug, Clone, Default)]\npub struct DeleteOpts {\n    /// Forcibly delete the container if it is still running\n    pub force: bool,\n}\n\nimpl Args for DeleteOpts {\n    type Output = Vec<String>;\n\n    fn args(&self) -> Self::Output {\n        let mut args: Vec<String> = vec![];\n        if self.force {\n            args.push(FORCE.to_string());\n        }\n        args\n    }\n}\n\nimpl DeleteOpts {\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    pub fn force(mut self, force: bool) -> Self {\n        self.force = force;\n        self\n    }\n}\n\n/// Container killing options\n#[derive(Debug, Clone, Default)]\npub struct KillOpts {\n    /// Seng the kill signal to all the processes inside the container\n    pub all: bool,\n}\n\nimpl Args for KillOpts {\n    type Output = Vec<String>;\n\n    fn args(&self) -> Self::Output {\n        let mut args: Vec<String> = vec![];\n        if self.all {\n            args.push(ALL.to_string());\n        }\n        args\n    }\n}\n\nimpl KillOpts {\n    pub fn new() -> Self {\n        Self::default()\n    }\n\n    pub fn all(mut self, all: bool) -> Self {\n        self.all = all;\n        self\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::env;\n\n    use super::*;\n\n    const ARGS_FAIL_MSG: &str = \"Args.args() failed.\";\n\n    #[test]\n    fn create_opts_test() {\n        assert_eq!(\n            CreateOpts::new().args().expect(ARGS_FAIL_MSG),\n            Vec::<String>::new()\n        );\n\n        assert_eq!(\n            CreateOpts::new().pid_file(\".\").args().expect(ARGS_FAIL_MSG),\n            vec![\n                \"--pid-file\".to_string(),\n                env::current_dir()\n                    .unwrap()\n                    .to_string_lossy()\n                    .parse::<String>()\n                    .unwrap()\n            ]\n        );\n\n        assert_eq!(\n            CreateOpts::new()\n                .console_socket(\"..\")\n                .args()\n                .expect(ARGS_FAIL_MSG),\n            vec![\n                \"--console-socket\".to_string(),\n                env::current_dir()\n                    .unwrap()\n                    .parent()\n                    .unwrap()\n                    .to_string_lossy()\n                    .parse::<String>()\n                    .unwrap()\n            ]\n        );\n\n        assert_eq!(\n            CreateOpts::new()\n                .detach(true)\n                .no_pivot(true)\n                .no_new_keyring(true)\n                .args()\n                .expect(ARGS_FAIL_MSG),\n            vec![\n                \"--no-pivot\".to_string(),\n                \"--no-new-keyring\".to_string(),\n                \"--detach\".to_string(),\n            ]\n        );\n    }\n\n    #[test]\n    fn exec_opts_test() {\n        assert_eq!(\n            ExecOpts::new().args().expect(ARGS_FAIL_MSG),\n            Vec::<String>::new()\n        );\n\n        assert_eq!(\n            ExecOpts::new().pid_file(\".\").args().expect(ARGS_FAIL_MSG),\n            vec![\n                \"--pid-file\".to_string(),\n                env::current_dir()\n                    .unwrap()\n                    .to_string_lossy()\n                    .parse::<String>()\n                    .unwrap()\n            ]\n        );\n\n        assert_eq!(\n            ExecOpts::new()\n                .console_socket(\"..\")\n                .args()\n                .expect(ARGS_FAIL_MSG),\n            vec![\n                \"--console-socket\".to_string(),\n                env::current_dir()\n                    .unwrap()\n                    .parent()\n                    .unwrap()\n                    .to_string_lossy()\n                    .parse::<String>()\n                    .unwrap()\n            ]\n        );\n\n        assert_eq!(\n            ExecOpts::new().detach(true).args().expect(ARGS_FAIL_MSG),\n            vec![\"--detach\".to_string(),]\n        );\n    }\n\n    #[test]\n    fn delete_opts_test() {\n        assert_eq!(DeleteOpts::new().force(false).args(), Vec::<String>::new());\n\n        assert_eq!(\n            DeleteOpts::new().force(true).args(),\n            vec![\"--force\".to_string()],\n        );\n    }\n\n    #[test]\n    fn kill_opts_test() {\n        assert_eq!(KillOpts::new().all(false).args(), Vec::<String>::new());\n\n        assert_eq!(KillOpts::new().all(true).args(), vec![\"--all\".to_string()],);\n    }\n\n    #[cfg(target_os = \"linux\")]\n    #[test]\n    fn global_opts_test() {\n        let cfg = GlobalOpts::default().command(\"true\");\n        let runc = cfg.build().unwrap();\n        let args = &runc.args;\n        assert_eq!(args.len(), 2);\n        assert!(args.contains(&LOG_FORMAT.to_string()));\n        assert!(args.contains(&TEXT.to_string()));\n\n        let cfg = GlobalOpts::default().command(\"/bin/true\");\n        let runc = cfg.build().unwrap();\n        assert_eq!(runc.args.len(), 2);\n\n        let cfg = GlobalOpts::default()\n            .command(\"true\")\n            .root(\"/tmp\")\n            .debug(true)\n            .log(\"/tmp/runc.log\")\n            .log_json()\n            .systemd_cgroup(true)\n            .rootless(true);\n        let runc = cfg.build().unwrap();\n        let args = &runc.args;\n        assert!(args.contains(&ROOT.to_string()));\n        assert!(args.contains(&DEBUG.to_string()));\n        assert!(args.contains(&\"/tmp\".to_string()));\n        assert!(args.contains(&LOG.to_string()));\n        assert!(args.contains(&\"/tmp/runc.log\".to_string()));\n        assert!(args.contains(&LOG_FORMAT.to_string()));\n        assert!(args.contains(&JSON.to_string()));\n        assert!(args.contains(&\"--rootless=true\".to_string()));\n        assert!(args.contains(&SYSTEMD_CGROUP.to_string()));\n        assert_eq!(args.len(), 9);\n    }\n}\n"
  },
  {
    "path": "crates/runc/src/synchronous/io.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{\n    fmt::Debug,\n    fs::{File, OpenOptions},\n    io::Result,\n    os::unix::fs::OpenOptionsExt,\n    process::Stdio,\n    sync::Mutex,\n};\n\nuse nix::unistd::{Gid, Uid};\n\nuse super::Io;\nuse crate::{Command, Pipe, PipedIo};\n\n#[derive(Debug, Clone)]\npub struct IOOption {\n    pub open_stdin: bool,\n    pub open_stdout: bool,\n    pub open_stderr: bool,\n}\n\nimpl Default for IOOption {\n    fn default() -> Self {\n        Self {\n            open_stdin: true,\n            open_stdout: true,\n            open_stderr: true,\n        }\n    }\n}\n\nimpl PipedIo {\n    pub fn new(uid: u32, gid: u32, opts: &IOOption) -> std::io::Result<Self> {\n        Ok(Self {\n            stdin: if opts.open_stdin {\n                Self::create_pipe(uid, gid, true)?\n            } else {\n                None\n            },\n            stdout: if opts.open_stdout {\n                Self::create_pipe(uid, gid, true)?\n            } else {\n                None\n            },\n            stderr: if opts.open_stderr {\n                Self::create_pipe(uid, gid, true)?\n            } else {\n                None\n            },\n        })\n    }\n\n    fn create_pipe(uid: u32, gid: u32, stdin: bool) -> std::io::Result<Option<Pipe>> {\n        let pipe = Pipe::new()?;\n        let uid = Some(Uid::from_raw(uid));\n        let gid = Some(Gid::from_raw(gid));\n        if stdin {\n            let rd = pipe.rd.try_clone()?;\n            nix::unistd::fchown(rd, uid, gid)?;\n        } else {\n            let wr = pipe\n                .try_clone_wr()\n                .ok_or_else(|| std::io::Error::other(\"write end closed\"))?;\n            nix::unistd::fchown(wr, uid, gid)?;\n        }\n        Ok(Some(pipe))\n    }\n}\n\n/// IO driver to direct output/error messages to /dev/null.\n///\n/// With this Io driver, all methods of [crate::Runc] can't capture the output/error messages.\n#[derive(Debug)]\npub struct NullIo {\n    dev_null: Mutex<Option<File>>,\n}\n\nimpl NullIo {\n    pub fn new() -> std::io::Result<Self> {\n        let f = OpenOptions::new().read(true).open(\"/dev/null\")?;\n        let dev_null = Mutex::new(Some(f));\n        Ok(Self { dev_null })\n    }\n}\n\nimpl Io for NullIo {\n    fn set(&self, cmd: &mut Command) -> std::io::Result<()> {\n        if let Some(null) = self.dev_null.lock().unwrap().as_ref() {\n            cmd.stdout(null.try_clone()?);\n            cmd.stderr(null.try_clone()?);\n        }\n        Ok(())\n    }\n\n    fn close_after_start(&self) {\n        let mut m = self.dev_null.lock().unwrap();\n        let _ = m.take();\n    }\n}\n\n/// Io driver based on Stdio::inherited(), to direct outputs/errors to stdio.\n///\n/// With this Io driver, all methods of [crate::Runc] can't capture the output/error messages.\n#[derive(Debug)]\npub struct InheritedStdIo {}\n\nimpl InheritedStdIo {\n    pub fn new() -> std::io::Result<Self> {\n        Ok(InheritedStdIo {})\n    }\n}\n\nimpl Io for InheritedStdIo {\n    fn set(&self, cmd: &mut Command) -> std::io::Result<()> {\n        cmd.stdin(Stdio::null())\n            .stdout(Stdio::inherit())\n            .stderr(Stdio::inherit());\n        Ok(())\n    }\n\n    fn close_after_start(&self) {}\n}\n\n/// Io driver based on Stdio::piped(), to capture outputs/errors from runC.\n///\n/// With this Io driver, methods of [crate::Runc] may capture the output/error messages.\n#[derive(Debug)]\npub struct PipedStdIo {}\n\nimpl PipedStdIo {\n    pub fn new() -> std::io::Result<Self> {\n        Ok(PipedStdIo {})\n    }\n}\n\nimpl Io for PipedStdIo {\n    fn set(&self, cmd: &mut Command) -> std::io::Result<()> {\n        cmd.stdin(Stdio::null())\n            .stdout(Stdio::piped())\n            .stderr(Stdio::piped());\n        Ok(())\n    }\n\n    fn close_after_start(&self) {}\n}\n\n/// FIFO for the scenario that set FIFO for command Io.\n#[derive(Debug)]\npub struct FIFO {\n    pub stdin: Option<String>,\n    pub stdout: Option<String>,\n    pub stderr: Option<String>,\n}\n\nimpl Io for FIFO {\n    fn set(&self, cmd: &mut Command) -> Result<()> {\n        if let Some(path) = self.stdin.as_ref() {\n            let stdin = OpenOptions::new()\n                .read(true)\n                .custom_flags(libc::O_NONBLOCK)\n                .open(path)?;\n            cmd.stdin(stdin);\n        }\n\n        if let Some(path) = self.stdout.as_ref() {\n            let stdout = OpenOptions::new().write(true).open(path)?;\n            cmd.stdout(stdout);\n        }\n\n        if let Some(path) = self.stderr.as_ref() {\n            let stderr = OpenOptions::new().write(true).open(path)?;\n            cmd.stderr(stderr);\n        }\n\n        Ok(())\n    }\n\n    fn close_after_start(&self) {}\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[cfg(not(target_os = \"macos\"))]\n    #[test]\n    fn test_io_option() {\n        let opts = IOOption {\n            open_stdin: false,\n            open_stdout: false,\n            open_stderr: false,\n        };\n        let io = PipedIo::new(1000, 1000, &opts).unwrap();\n\n        assert!(io.stdin().is_none());\n        assert!(io.stdout().is_none());\n        assert!(io.stderr().is_none());\n    }\n\n    #[cfg(target_os = \"linux\")]\n    #[test]\n    fn test_create_piped_io() {\n        use std::io::{Read, Write};\n\n        let opts = IOOption::default();\n        let uid = nix::unistd::getuid();\n        let gid = nix::unistd::getgid();\n        let io = PipedIo::new(uid.as_raw(), gid.as_raw(), &opts).unwrap();\n        let mut buf = [0xfau8];\n\n        let mut stdin = io.stdin().unwrap();\n        stdin.write_all(&buf).unwrap();\n        buf[0] = 0x0;\n\n        io.stdin\n            .as_ref()\n            .map(|v| v.rd.try_clone().unwrap().read(&mut buf).unwrap());\n        assert_eq!(&buf, &[0xfau8]);\n\n        let mut stdout = io.stdout().unwrap();\n        buf[0] = 0xce;\n        io.stdout\n            .as_ref()\n            .map(|v| v.try_clone_wr().unwrap().write(&buf).unwrap());\n        buf[0] = 0x0;\n        stdout.read_exact(&mut buf).unwrap();\n        assert_eq!(&buf, &[0xceu8]);\n\n        let mut stderr = io.stderr().unwrap();\n        buf[0] = 0xa5;\n        io.stderr\n            .as_ref()\n            .map(|v| v.try_clone_wr().unwrap().write(&buf).unwrap());\n        buf[0] = 0x0;\n        stderr.read_exact(&mut buf).unwrap();\n        assert_eq!(&buf, &[0xa5u8]);\n\n        io.close_after_start();\n        stdout.read_exact(&mut buf).unwrap_err();\n        stderr.read_exact(&mut buf).unwrap_err();\n    }\n\n    #[test]\n    fn test_null_io() {\n        let io = NullIo::new().unwrap();\n        assert!(io.stdin().is_none());\n        assert!(io.stdout().is_none());\n        assert!(io.stderr().is_none());\n    }\n}\n"
  },
  {
    "path": "crates/runc/src/synchronous/mod.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\npub mod io;\nmod pipe;\nmod runc;\nuse std::{\n    fmt::Debug,\n    io::{Read, Result, Write},\n};\n\npub use pipe::Pipe;\npub use runc::{DefaultExecutor, Spawner};\n\nuse crate::Command;\n\npub trait Io: Debug + Send + Sync {\n    /// Return write side of stdin\n    fn stdin(&self) -> Option<Box<dyn Write + Send + Sync>> {\n        None\n    }\n\n    /// Return read side of stdout\n    fn stdout(&self) -> Option<Box<dyn Read + Send>> {\n        None\n    }\n\n    /// Return read side of stderr\n    fn stderr(&self) -> Option<Box<dyn Read + Send>> {\n        None\n    }\n\n    /// Set IO for passed command.\n    /// Read side of stdin, write side of stdout and write side of stderr should be provided to command.\n    fn set(&self, cmd: &mut Command) -> Result<()>;\n\n    /// Only close write side (should be stdout/err \"from\" runc process)\n    fn close_after_start(&self);\n}\n\n#[derive(Debug)]\npub struct PipedIo {\n    pub stdin: Option<Pipe>,\n    pub stdout: Option<Pipe>,\n    pub stderr: Option<Pipe>,\n}\n\nimpl Io for PipedIo {\n    fn stdin(&self) -> Option<Box<dyn Write + Send + Sync>> {\n        self.stdin\n            .as_ref()\n            .and_then(|pipe| pipe.try_clone_wr())\n            .map(|x| Box::new(x) as Box<dyn Write + Send + Sync>)\n    }\n\n    fn stdout(&self) -> Option<Box<dyn Read + Send>> {\n        self.stdout.as_ref().and_then(|pipe| {\n            pipe.rd\n                .try_clone()\n                .map(|x| Box::new(x) as Box<dyn Read + Send>)\n                .ok()\n        })\n    }\n\n    fn stderr(&self) -> Option<Box<dyn Read + Send>> {\n        self.stderr.as_ref().and_then(|pipe| {\n            pipe.rd\n                .try_clone()\n                .map(|x| Box::new(x) as Box<dyn Read + Send>)\n                .ok()\n        })\n    }\n    // Note that this internally use [`std::fs::File`]'s `try_clone()`.\n    // Thus, the files passed to commands will be not closed after command exit.\n    fn set(&self, cmd: &mut Command) -> std::io::Result<()> {\n        if let Some(p) = self.stdin.as_ref() {\n            let pr = p.rd.try_clone()?;\n            cmd.stdin(pr);\n        }\n\n        if let Some(p) = self.stdout.as_ref() {\n            let pw = p\n                .try_clone_wr()\n                .ok_or_else(|| std::io::Error::other(\"write end closed\"))?;\n            cmd.stdout(pw);\n        }\n\n        if let Some(p) = self.stderr.as_ref() {\n            let pw = p\n                .try_clone_wr()\n                .ok_or_else(|| std::io::Error::other(\"write end closed\"))?;\n            cmd.stdout(pw);\n        }\n\n        Ok(())\n    }\n\n    fn close_after_start(&self) {\n        if let Some(p) = self.stdout.as_ref() {\n            p.close_wr();\n        }\n\n        if let Some(p) = self.stderr.as_ref() {\n            p.close_wr();\n        }\n    }\n}\n"
  },
  {
    "path": "crates/runc/src/synchronous/pipe.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{\n    io::{PipeReader, PipeWriter},\n    sync::Mutex,\n};\n\n#[derive(Debug)]\npub struct Pipe {\n    pub rd: PipeReader,\n    wr: Mutex<Option<PipeWriter>>,\n}\n\nimpl Pipe {\n    pub fn new() -> std::io::Result<Self> {\n        let (rd, wr) = std::io::pipe()?;\n        Ok(Self {\n            rd,\n            wr: Mutex::new(Some(wr)),\n        })\n    }\n\n    /// Clone the write end. Returns `None` if closed.\n    pub fn try_clone_wr(&self) -> Option<PipeWriter> {\n        self.wr\n            .lock()\n            .unwrap()\n            .as_ref()\n            .and_then(|w| w.try_clone().ok())\n    }\n\n    /// Close the write end by dropping it. No-op if already closed.\n    pub fn close_wr(&self) {\n        let _ = self.wr.lock().unwrap().take();\n    }\n}\n"
  },
  {
    "path": "crates/runc/src/synchronous/runc.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{fmt::Debug, path::Path, process::ExitStatus};\n\nuse oci_spec::runtime::{LinuxResources, Process};\n\nuse crate::{\n    container::Container,\n    error::Error,\n    events,\n    options::*,\n    utils::{self, write_value_to_temp_file},\n    Command, Response, Result, Runc,\n};\n\nimpl Runc {\n    pub(crate) fn launch(&self, cmd: Command, combined_output: bool) -> Result<Response> {\n        let (status, pid, stdout, stderr) = self.spawner.execute(cmd)?;\n        if status.success() {\n            let output = if combined_output {\n                stdout + stderr.as_str()\n            } else {\n                stdout\n            };\n            Ok(Response {\n                pid,\n                status,\n                output,\n            })\n        } else {\n            Err(Error::CommandFailed {\n                status,\n                stdout,\n                stderr,\n            })\n        }\n    }\n\n    /// Create a new container\n    pub fn create<P>(&self, id: &str, bundle: P, opts: Option<&CreateOpts>) -> Result<Response>\n    where\n        P: AsRef<Path>,\n    {\n        let mut args = vec![\n            \"create\".to_string(),\n            \"--bundle\".to_string(),\n            utils::abs_string(bundle)?,\n        ];\n        if let Some(opts) = opts {\n            args.append(&mut opts.args()?);\n        }\n        args.push(id.to_string());\n        let mut cmd = self.command(&args)?;\n        match opts {\n            Some(CreateOpts { io: Some(io), .. }) => {\n                io.set(&mut cmd).map_err(|e| Error::IoSet(e.to_string()))?;\n                let res = self.launch(cmd, true)?;\n                io.close_after_start();\n                Ok(res)\n            }\n            _ => self.launch(cmd, true),\n        }\n    }\n\n    /// Delete a container\n    pub fn delete(&self, id: &str, opts: Option<&DeleteOpts>) -> Result<()> {\n        let mut args = vec![\"delete\".to_string()];\n        if let Some(opts) = opts {\n            args.append(&mut opts.args());\n        }\n        args.push(id.to_string());\n        self.launch(self.command(&args)?, true)?;\n        Ok(())\n    }\n\n    /// Execute an additional process inside the container\n    pub fn exec(&self, id: &str, spec: &Process, opts: Option<&ExecOpts>) -> Result<()> {\n        let (_temp_file, filename) = write_value_to_temp_file(spec)?;\n        let mut args = vec![\"exec\".to_string(), \"--process\".to_string(), filename];\n        if let Some(opts) = opts {\n            args.append(&mut opts.args()?);\n        }\n        args.push(id.to_string());\n        let mut cmd = self.command(&args)?;\n        match opts {\n            Some(ExecOpts { io: Some(io), .. }) => {\n                io.set(&mut cmd).map_err(|e| Error::IoSet(e.to_string()))?;\n                self.launch(cmd, true)?;\n                io.close_after_start();\n            }\n            _ => {\n                self.launch(cmd, true)?;\n            }\n        }\n        Ok(())\n    }\n\n    /// Send the specified signal to processes inside the container\n    pub fn kill(&self, id: &str, sig: u32, opts: Option<&KillOpts>) -> Result<()> {\n        let mut args = vec![\"kill\".to_string()];\n        if let Some(opts) = opts {\n            args.append(&mut opts.args());\n        }\n        args.push(id.to_string());\n        args.push(sig.to_string());\n        let _ = self.launch(self.command(&args)?, true)?;\n        Ok(())\n    }\n\n    /// List all containers associated with this runc instance\n    pub fn list(&self) -> Result<Vec<Container>> {\n        let args = [\"list\".to_string(), \"--format=json\".to_string()];\n        let res = self.launch(self.command(&args)?, true)?;\n        let output = res.output.trim();\n\n        // Ugly hack to work around golang\n        Ok(if output == \"null\" {\n            Vec::new()\n        } else {\n            serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)?\n        })\n    }\n\n    /// Pause a container\n    pub fn pause(&self, id: &str) -> Result<()> {\n        let args = [\"pause\".to_string(), id.to_string()];\n        let _ = self.launch(self.command(&args)?, true)?;\n        Ok(())\n    }\n\n    /// Resume a container\n    pub fn resume(&self, id: &str) -> Result<()> {\n        let args = [\"resume\".to_string(), id.to_string()];\n        let _ = self.launch(self.command(&args)?, true)?;\n        Ok(())\n    }\n\n    pub fn checkpoint(&self) -> Result<()> {\n        Err(Error::Unimplemented(\"checkpoint\".to_string()))\n    }\n\n    pub fn restore(&self) -> Result<()> {\n        Err(Error::Unimplemented(\"restore\".to_string()))\n    }\n\n    /// List all the processes inside the container, returning their pids\n    pub fn ps(&self, id: &str) -> Result<Vec<usize>> {\n        let args = [\n            \"ps\".to_string(),\n            \"--format=json\".to_string(),\n            id.to_string(),\n        ];\n        let res = self.launch(self.command(&args)?, false)?;\n        let output = res.output.trim();\n\n        // Ugly hack to work around golang\n        Ok(if output == \"null\" {\n            Vec::new()\n        } else {\n            serde_json::from_str(output).map_err(Error::JsonDeserializationFailed)?\n        })\n    }\n\n    /// Run the create, start, delete lifecycle of the container and return its exit status\n    pub fn run<P>(&self, id: &str, bundle: P, opts: Option<&CreateOpts>) -> Result<Response>\n    where\n        P: AsRef<Path>,\n    {\n        let mut args = vec![\n            \"run\".to_string(),\n            \"--bundle\".to_string(),\n            utils::abs_string(bundle)?,\n        ];\n        if let Some(opts) = opts {\n            args.append(&mut opts.args()?);\n        }\n        args.push(id.to_string());\n        let mut cmd = self.command(&args)?;\n        if let Some(CreateOpts { io: Some(io), .. }) = opts {\n            io.set(&mut cmd).map_err(|e| Error::IoSet(e.to_string()))?;\n        };\n        self.launch(cmd, true)\n    }\n\n    /// Start an already created container\n    pub fn start(&self, id: &str) -> Result<Response> {\n        let args = [\"start\".to_string(), id.to_string()];\n        self.launch(self.command(&args)?, true)\n    }\n\n    /// Return the state of a container\n    pub fn state(&self, id: &str) -> Result<Container> {\n        let args = [\"state\".to_string(), id.to_string()];\n        let res = self.launch(self.command(&args)?, true)?;\n        serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed)\n    }\n\n    /// Return the latest statistics for a container\n    pub fn stats(&self, id: &str) -> Result<events::Stats> {\n        let args = vec![\"events\".to_string(), \"--stats\".to_string(), id.to_string()];\n        let res = self.launch(self.command(&args)?, true)?;\n        let event: events::Event =\n            serde_json::from_str(&res.output).map_err(Error::JsonDeserializationFailed)?;\n        if let Some(stats) = event.stats {\n            Ok(stats)\n        } else {\n            Err(Error::MissingContainerStats)\n        }\n    }\n\n    /// Update a container with the provided resource spec\n    pub fn update(&self, id: &str, resources: &LinuxResources) -> Result<()> {\n        let (_temp_file, filename) = write_value_to_temp_file(resources)?;\n        let args = [\n            \"update\".to_string(),\n            \"--resources\".to_string(),\n            filename,\n            id.to_string(),\n        ];\n        self.launch(self.command(&args)?, true)?;\n        Ok(())\n    }\n}\n\npub trait Spawner: Debug {\n    fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)>;\n}\n\n#[derive(Debug)]\npub struct DefaultExecutor {}\n\nimpl Spawner for DefaultExecutor {\n    fn execute(&self, cmd: Command) -> Result<(ExitStatus, u32, String, String)> {\n        let mut cmd = cmd;\n        let child = cmd.spawn().map_err(Error::ProcessSpawnFailed)?;\n        let pid = child.id();\n        let result = child.wait_with_output().map_err(Error::InvalidCommand)?;\n        let status = result.status;\n        let stdout = String::from_utf8_lossy(&result.stdout).to_string();\n        let stderr = String::from_utf8_lossy(&result.stderr).to_string();\n        Ok((status, pid, stdout, stderr))\n    }\n}\n\n#[cfg(test)]\n#[cfg(target_os = \"linux\")]\nmod tests {\n    use std::sync::Arc;\n\n    use oci_spec::runtime::Process;\n\n    use crate::{\n        error::Error,\n        io::{InheritedStdIo, PipedStdIo},\n        options::{CreateOpts, DeleteOpts, ExecOpts, GlobalOpts},\n        Runc,\n    };\n\n    fn ok_client() -> Runc {\n        GlobalOpts::new()\n            .command(\"/bin/true\")\n            .build()\n            .expect(\"unable to create runc instance\")\n    }\n\n    fn fail_client() -> Runc {\n        GlobalOpts::new()\n            .command(\"/bin/false\")\n            .build()\n            .expect(\"unable to create runc instance\")\n    }\n\n    fn echo_client() -> Runc {\n        GlobalOpts::new()\n            .command(\"/bin/echo\")\n            .build()\n            .expect(\"unable to create runc instance\")\n    }\n\n    fn dummy_process() -> Process {\n        serde_json::from_str(\n            \"\n            {\n                \\\"user\\\": {\n                    \\\"uid\\\": 1000,\n                    \\\"gid\\\": 1000\n                },\n                \\\"cwd\\\": \\\"/path/to/dir\\\"\n            }\",\n        )\n        .unwrap()\n    }\n\n    #[test]\n    fn test_create() {\n        let opts = CreateOpts::new();\n        let ok_runc = ok_client();\n        let response = ok_runc\n            .create(\"fake-id\", \"fake-bundle\", Some(&opts))\n            .expect(\"true failed.\");\n        assert_ne!(response.pid, 0);\n        assert!(response.status.success());\n        assert!(response.output.is_empty());\n\n        let fail_runc = fail_client();\n        match fail_runc.create(\"fake-id\", \"fake-bundle\", Some(&opts)) {\n            Ok(_) => panic!(\"fail_runc returned exit status 0.\"),\n            Err(Error::CommandFailed {\n                status,\n                stdout,\n                stderr,\n            }) => {\n                if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {\n                    eprintln!(\"fail_runc succeeded.\");\n                } else {\n                    panic!(\"unexpected outputs from fail_runc.\")\n                }\n            }\n            Err(e) => panic!(\"unexpected error from fail_runc: {:?}\", e),\n        }\n    }\n\n    #[test]\n    fn test_run() {\n        let opts = CreateOpts::new();\n        let ok_runc = ok_client();\n        let response = ok_runc\n            .run(\"fake-id\", \"fake-bundle\", Some(&opts))\n            .expect(\"true failed.\");\n        assert_ne!(response.pid, 0);\n        assert!(response.status.success());\n        assert!(response.output.is_empty());\n\n        let fail_runc = fail_client();\n        match fail_runc.run(\"fake-id\", \"fake-bundle\", Some(&opts)) {\n            Ok(_) => panic!(\"fail_runc returned exit status 0.\"),\n            Err(Error::CommandFailed {\n                status,\n                stdout,\n                stderr,\n            }) => {\n                if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {\n                    eprintln!(\"fail_runc succeeded.\");\n                } else {\n                    panic!(\"unexpected outputs from fail_runc.\")\n                }\n            }\n            Err(e) => panic!(\"unexpected error from fail_runc: {:?}\", e),\n        }\n    }\n\n    #[test]\n    fn test_exec() {\n        let opts = ExecOpts::new();\n        let ok_runc = ok_client();\n        let proc = dummy_process();\n        ok_runc\n            .exec(\"fake-id\", &proc, Some(&opts))\n            .expect(\"true failed.\");\n        eprintln!(\"ok_runc succeeded.\");\n\n        let fail_runc = fail_client();\n        match fail_runc.exec(\"fake-id\", &proc, Some(&opts)) {\n            Ok(_) => panic!(\"fail_runc returned exit status 0.\"),\n            Err(Error::CommandFailed {\n                status,\n                stdout,\n                stderr,\n            }) => {\n                if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {\n                    eprintln!(\"fail_runc succeeded.\");\n                } else {\n                    panic!(\"unexpected outputs from fail_runc.\")\n                }\n            }\n            Err(e) => panic!(\"unexpected error from fail_runc: {:?}\", e),\n        }\n    }\n\n    #[test]\n    fn test_delete() {\n        let opts = DeleteOpts::new();\n        let ok_runc = ok_client();\n        ok_runc\n            .delete(\"fake-id\", Some(&opts))\n            .expect(\"true failed.\");\n        eprintln!(\"ok_runc succeeded.\");\n\n        let fail_runc = fail_client();\n        match fail_runc.delete(\"fake-id\", Some(&opts)) {\n            Ok(_) => panic!(\"fail_runc returned exit status 0.\"),\n            Err(Error::CommandFailed {\n                status,\n                stdout,\n                stderr,\n            }) => {\n                if status.code().unwrap() == 1 && stdout.is_empty() && stderr.is_empty() {\n                    eprintln!(\"fail_runc succeeded.\");\n                } else {\n                    panic!(\"unexpected outputs from fail_runc.\")\n                }\n            }\n            Err(e) => panic!(\"unexpected error from fail_runc: {:?}\", e),\n        }\n    }\n\n    #[test]\n    fn test_output() {\n        // test create cmd with inherit Io, expect empty cmd output\n        let mut opts = CreateOpts::new();\n        opts.io = Some(Arc::new(InheritedStdIo::new().unwrap()));\n        let echo_runc = echo_client();\n        let response = echo_runc\n            .create(\"fake-id\", \"fake-bundle\", Some(&opts))\n            .expect(\"echo failed.\");\n        assert_ne!(response.pid, 0);\n        assert!(response.status.success());\n        assert!(response.output.is_empty());\n\n        // test create cmd with pipe Io, expect nonempty cmd output\n        let mut opts = CreateOpts::new();\n        opts.io = Some(Arc::new(PipedStdIo::new().unwrap()));\n        let echo_runc = echo_client();\n        let response = echo_runc\n            .create(\"fake-id\", \"fake-bundle\", Some(&opts))\n            .expect(\"echo failed.\");\n        assert_ne!(response.pid, 0);\n        assert!(response.status.success());\n        assert!(!response.output.is_empty());\n    }\n}\n"
  },
  {
    "path": "crates/runc/src/utils.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n#[cfg(not(feature = \"async\"))]\nuse std::io::Write;\nuse std::{\n    env,\n    path::{Path, PathBuf},\n};\n\nuse serde::Serialize;\n#[cfg(not(feature = \"async\"))]\nuse tempfile::{Builder, NamedTempFile};\n#[cfg(feature = \"async\")]\nuse tokio::io::AsyncWriteExt;\nuse uuid::Uuid;\n\nuse crate::error::Error;\n\n// helper to resolve path (such as path for runc binary, pid files, etc. )\npub fn abs_path_buf<P>(path: P) -> Result<PathBuf, Error>\nwhere\n    P: AsRef<Path>,\n{\n    let abs = std::path::absolute(path).map_err(Error::InvalidPath)?;\n    let mut normalized = PathBuf::new();\n    for component in abs.components() {\n        match component {\n            std::path::Component::ParentDir => {\n                normalized.pop();\n            }\n            std::path::Component::CurDir => {}\n            c => normalized.push(c),\n        }\n    }\n    Ok(normalized)\n}\n\nfn path_to_string(path: impl AsRef<Path>) -> Result<String, Error> {\n    path.as_ref()\n        .to_str()\n        .map(|v| v.to_string())\n        .ok_or_else(|| {\n            Error::InvalidPath(std::io::Error::other(format!(\n                \"invalid UTF-8 string: {}\",\n                path.as_ref().to_string_lossy()\n            )))\n        })\n}\n\npub fn abs_string<P>(path: P) -> Result<String, Error>\nwhere\n    P: AsRef<Path>,\n{\n    path_to_string(abs_path_buf(path)?)\n}\n\n/// Returns a temp dir. If the environment variable \"XDG_RUNTIME_DIR\" is set, return its value.\n/// Otherwise if `std::env::temp_dir()` failed, return current dir or return the temp dir depended on OS.\nfn xdg_runtime_dir() -> String {\n    env::var(\"XDG_RUNTIME_DIR\")\n        .unwrap_or_else(|_| abs_string(env::temp_dir()).unwrap_or_else(|_| \".\".to_string()))\n}\n\n/// Write the serialized 'value' to a temp file\n#[cfg(not(feature = \"async\"))]\npub fn write_value_to_temp_file<T: Serialize>(value: &T) -> Result<(NamedTempFile, String), Error> {\n    let filename = format!(\"{}/runc-process-{}\", xdg_runtime_dir(), Uuid::new_v4());\n    let mut temp_file = Builder::new()\n        .prefix(&filename)\n        .rand_bytes(0)\n        .tempfile()\n        .map_err(Error::SpecFileCreationFailed)?;\n    let f = temp_file.as_file_mut();\n    let spec_json = serde_json::to_string(value).map_err(Error::JsonDeserializationFailed)?;\n    f.write(spec_json.as_bytes())\n        .map_err(Error::SpecFileCreationFailed)?;\n    f.flush().map_err(Error::SpecFileCreationFailed)?;\n    Ok((temp_file, filename))\n}\n\n/// Write the serialized 'value' to a temp file\n/// Unlike the same function in non-async feature,\n/// it returns the filename, without the NamedTempFile object,\n/// which implements Drop trait to remove the file if it goes out of scope.\n/// the async Drop is still not supported in rust,\n/// in async context, the created file should be removed by the caller\n#[cfg(feature = \"async\")]\npub async fn write_value_to_temp_file<T: Serialize>(value: &T) -> Result<String, Error> {\n    let filename = format!(\"{}/runc-process-{}\", xdg_runtime_dir(), Uuid::new_v4());\n    let mut f = tokio::fs::OpenOptions::new()\n        .create(true)\n        .truncate(true)\n        .write(true)\n        .open(&filename)\n        .await\n        .map_err(Error::FileSystemError)?;\n    let spec_json = serde_json::to_string(value).map_err(Error::JsonDeserializationFailed)?;\n    f.write_all(spec_json.as_bytes())\n        .await\n        .map_err(Error::SpecFileCreationFailed)?;\n    f.flush().await.map_err(Error::SpecFileCreationFailed)?;\n    Ok(filename)\n}\n\n/// Resolve a binary path according to the `PATH` environment variable.\n///\n/// Note, the case that `path` is already an absolute path is implicitly handled by\n/// `dir.join(path.as_ref())`. `Path::join(parent_path, path)` directly returns `path` when `path`\n/// is an absolute path.\npub fn binary_path<P>(path: P) -> Option<PathBuf>\nwhere\n    P: AsRef<Path>,\n{\n    env::var_os(\"PATH\").and_then(|paths| {\n        env::split_paths(&paths).find_map(|dir| {\n            let full_path = dir.join(path.as_ref());\n            if full_path.is_file() {\n                Some(full_path)\n            } else {\n                None\n            }\n        })\n    })\n}\n"
  },
  {
    "path": "crates/runc-shim/Cargo.toml",
    "content": "[package]\nname = \"containerd-runc-shim\"\nversion = \"0.2.0\"\nauthors = [\n    \"Shaobao Feng <fshb1988@gmail.com>\",\n    \"Tianyang Zhang <burning9699@gmail.com>\",\n    \"The containerd Authors\",\n]\ndescription = \"Rust implementation of containerd's runc v2 shim runtime\"\nkeywords = [\"containerd\", \"shim\", \"containers\"]\ncategories = [\"api-bindings\", \"asynchronous\"]\n\nedition.workspace = true\nlicense.workspace = true\nrepository.workspace = true\nhomepage.workspace = true\n\n[[bin]]\n# Overwrite the binary name so it can be referred as \"io.containerd.runc.v2-rs\" from containerd.\n# Note: the runtime's binary name must start with \"io.containerd.runc\" in order to\n# keep compatibility with Go runc runtime and the containerd client.\n# Example: https://github.com/containerd/containerd/blob/8047eb2fcac1f4553ee7652862194b1e10855ce7/task_opts_unix.go#L33\nname = \"containerd-shim-runc-v2-rs\"\npath = \"src/main.rs\"\ndoc = false\n\n[dependencies]\ncontainerd-shim = { path = \"../shim\", version = \"0.11.0\", features = [\"async\"] }\nlibc.workspace = true\nlog.workspace = true\nnix = { workspace = true, features = [\"socket\", \"uio\", \"term\", \"signal\"] }\noci-spec = { workspace = true, features = [\"runtime\"] }\nrunc = { path = \"../runc\", version = \"0.3.0\", features = [\"async\"] }\nserde = { workspace = true, features = [\"derive\", \"std\"] }\nserde_json = { workspace = true, features = [\"std\"] }\ntime = { workspace = true, features = [\"std\"] }\nuuid = { workspace = true, features = [\"v4\"] }\n# Async dependencies\nasync-trait.workspace = true\ntokio = { workspace = true, features = [\"macros\", \"rt-multi-thread\", \"process\", \"sync\", \"fs\", \"io-util\", \"net\", \"time\", \"signal\"] }\nrustix = { version = \"1.1\", default-features = false, features = [\"std\", \"termios\"] }\n\n[package.metadata.cargo-machete]\nignored = [\"libc\"]\n\n[target.'cfg(target_os = \"linux\")'.dependencies]\ncgroups-rs.workspace = true\nnix = { workspace = true, features = [\"event\"] }\ntokio-eventfd = \"0.2.2\"\n"
  },
  {
    "path": "crates/runc-shim/README.md",
    "content": "# Rust containerd shim v2 for runc container\n\n[![Crates.io](https://img.shields.io/crates/v/containerd-runc-shim)](https://crates.io/crates/containerd-runc-shim)\n[![docs.rs](https://img.shields.io/docsrs/containerd-runc-shim)](https://docs.rs/containerd-runc-shim/latest/containerd-runc-shim/)\n[![Crates.io](https://img.shields.io/crates/l/containerd-shim)](https://github.com/containerd/rust-extensions/blob/main/LICENSE)\n[![CI](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml)\n\nBy default [containerd](https://github.com/containerd/containerd) relies on runc shim v2 runtime (written in `Go`) to launch containers.\nThis crate is an alternative Rust implementation of the shim runtime.\nIt conforms to containerd's integration tests and can be replaced with the original Go runtime interchangeably.\n\n## Usage\n\nTo build binary, run:\n```shell\ncargo build --release --bin containerd-shim-runc-v2-rs\n```\n\nReplace it to the containerd shim dir: `/usr/local/bin/containerd-shim-runc-v2-rs`\n\nIn order to use it from containerd, use:\n\n```shell\n$ sudo ctr run --rm --runtime io.containerd.runc.v2-rs -t docker.io/library/hello-world:latest hello\n```\n\nYou can run a container by `ctr`, `crictl` or kubernetes API.\n\n## Performance test\n\n### Memory overhead\n\nThree different kinds of shim binaries are used to compare memory overhead, first is `containerd-shimv2-runc-v2`\ncompiled by golang, next is our sync `containerd-shim-runc-v2-rs` and the last one is our async `containerd-shim-runc-v2-rs`\nbut limited to 2 work threads.\n\nWe run a *busybox* container inside a pod on a *16U32G Ubuntu20.04* mechine with *containerd v1.6.8* and *runc v1.1.4*.\nTo measure the memory size of shim process we parse the output of *smaps* file and add up all RSS segments.\nIn addition, we also run 100 pods and collect the total memory overhead.\n\n |                                                              | Single Process RSS | 100 Processes RSS |\n | :----------------------------------------------------------- | :----------------- | :---------------- |\n | containerd-shim-runc-v2                                      | 11.02MB            | 1106.52MB         |\n | containerd-shim-runc-v2-rs(sync)                             | 3.45MB             | 345.39MB          |\n | containerd-shim-runc-v2-rs(async, limited to 2 work threads) | 3.90MB             | 396.83MB          |\n"
  },
  {
    "path": "crates/runc-shim/build.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{process::Command, str::from_utf8};\n\nfn main() {\n    let output = match Command::new(\"git\").arg(\"rev-parse\").arg(\"HEAD\").output() {\n        Ok(output) => output,\n        Err(_) => {\n            return;\n        }\n    };\n    let mut hash = from_utf8(&output.stdout).unwrap().trim().to_string();\n\n    let output_dirty = match Command::new(\"git\").arg(\"diff\").arg(\"--exit-code\").output() {\n        Ok(output) => output,\n        Err(_) => {\n            return;\n        }\n    };\n\n    if !output_dirty.status.success() {\n        hash.push_str(\".m\");\n    }\n    println!(\"cargo:rustc-env=CARGO_GIT_HASH={}\", hash);\n}\n"
  },
  {
    "path": "crates/runc-shim/src/cgroup_memory.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n#![cfg(target_os = \"linux\")]\n\nuse std::{os::unix::io::AsRawFd, path::Path};\n\nuse containerd_shim::{\n    error::{Error, Result},\n    io_error, other_error,\n};\nuse tokio::{\n    fs::{self, read_to_string},\n    io::AsyncReadExt,\n    sync::mpsc::{self, Receiver},\n};\nuse tokio_eventfd::EventFd;\n\npub async fn get_path_from_cgorup(pid: u32) -> Result<String> {\n    let proc_path = format!(\"/proc/{}/cgroup\", pid);\n    let path_string = read_to_string(&proc_path)\n        .await\n        .map_err(io_error!(e, \"open {}.\", &proc_path))?;\n\n    let (_, path) = path_string\n        .lines()\n        .find(|line| line.contains(\"memory\"))\n        .ok_or(Error::Other(\"Memory line not found\".into()))?\n        .split_once(\":memory:\")\n        .ok_or(Error::Other(\"Failed to parse memory line\".into()))?;\n\n    Ok(path.to_string())\n}\n\npub async fn get_existing_cgroup_mem_path(pid_path: String) -> Result<(String, String)> {\n    let (mut mount_root, mount_point) = get_path_from_mountinfo().await?;\n    if mount_root == \"/\" {\n        mount_root = String::from(\"\");\n    }\n    let mount_root = pid_path.trim_start_matches(&mount_root).to_string();\n    Ok((mount_root, mount_point))\n}\n\nasync fn get_path_from_mountinfo() -> Result<(String, String)> {\n    let mountinfo_path = \"/proc/self/mountinfo\";\n    let mountinfo_string =\n        read_to_string(mountinfo_path)\n            .await\n            .map_err(io_error!(e, \"open {}.\", mountinfo_path))?;\n\n    let line = mountinfo_string\n        .lines()\n        .find(|line| line.contains(\"cgroup\") && line.contains(\"memory\"))\n        .ok_or(Error::Other(\n            \"Lines containers cgroup and memory not found in mountinfo\".into(),\n        ))?;\n\n    parse_memory_mountroot(line)\n}\n\nfn parse_memory_mountroot(line: &str) -> Result<(String, String)> {\n    let mut columns = line.split_whitespace();\n    let mount_root = columns.nth(3).ok_or(Error::Other(\n        \"Invalid input information about mountinfo\".into(),\n    ))?;\n    let mount_point = columns.next().ok_or(Error::Other(\n        \"Invalid input information about mountinfo\".into(),\n    ))?;\n    Ok((mount_root.to_string(), mount_point.to_string()))\n}\n\npub async fn register_memory_event(\n    key: &str,\n    cg_dir: &Path,\n    event_name: &str,\n) -> Result<Receiver<String>> {\n    let path = cg_dir.join(event_name);\n    let event_file = fs::File::open(path.clone())\n        .await\n        .map_err(other_error!(\"Error get path:\"))?;\n    let mut eventfd = EventFd::new(0, false).map_err(other_error!(\"Error create eventfd:\"))?;\n    let event_control_path = cg_dir.join(\"cgroup.event_control\");\n    let data = format!(\"{} {}\", eventfd.as_raw_fd(), event_file.as_raw_fd());\n    fs::write(&event_control_path, data.clone())\n        .await\n        .map_err(other_error!(\"Error write eventfd:\"))?;\n\n    let mut buf = [0u8; 8];\n\n    let (sender, receiver) = mpsc::channel(128);\n    let key = key.to_string();\n\n    tokio::spawn(async move {\n        loop {\n            match eventfd.read(&mut buf).await {\n                Ok(0) => return,\n                Err(_) => return,\n                _ => (),\n            }\n            if !Path::new(&event_control_path).exists() {\n                return;\n            }\n            sender.send(key.clone()).await.unwrap();\n        }\n    });\n\n    Ok(receiver)\n}\n\n#[cfg(test)]\nmod tests {\n    use std::path::Path;\n\n    use cgroups_rs::{\n        fs::{\n            hierarchies::{self, is_cgroup2_unified_mode},\n            memory::MemController,\n            Cgroup,\n        },\n        CgroupPid,\n    };\n    use tokio::{fs::remove_file, io::AsyncWriteExt, process::Command};\n\n    use crate::cgroup_memory;\n\n    #[tokio::test]\n    async fn test_cgroupv1_oom_monitor() {\n        if !is_cgroup2_unified_mode() {\n            // Create a memory cgroup with limits on both memory and swap.\n            let path = \"cgroupv1_oom_monitor\";\n            let cg = Cgroup::new(hierarchies::auto(), path).unwrap();\n            let mem_controller: &MemController = cg.controller_of().unwrap();\n            mem_controller.set_limit(10 * 1024 * 1024).unwrap(); // 10M\n            mem_controller.set_swappiness(0).unwrap();\n\n            // Create a sh sub process, and let it wait for the stdinput.\n            let mut child_process = Command::new(\"sh\")\n                .stdin(std::process::Stdio::piped())\n                .spawn()\n                .unwrap();\n\n            let pid = child_process.id().unwrap();\n\n            // Add the sh subprocess to the cgroup.\n            cg.add_task_by_tgid(CgroupPid::from(pid as u64)).unwrap();\n\n            // Set oom monitor\n            let path_from_cgorup = cgroup_memory::get_path_from_cgorup(pid).await.unwrap();\n            let (mount_root, mount_point) =\n                cgroup_memory::get_existing_cgroup_mem_path(path_from_cgorup)\n                    .await\n                    .unwrap();\n\n            let mem_cgroup_path = mount_point + &mount_root;\n            let mut rx = cgroup_memory::register_memory_event(\n                pid.to_string().as_str(),\n                Path::new(&mem_cgroup_path),\n                \"memory.oom_control\",\n            )\n            .await\n            .unwrap();\n\n            // Exec the sh subprocess to a dd command that consumes more than 10M of memory.\n            if let Some(mut stdin) = child_process.stdin.take() {\n                stdin\n                    .write_all(\n                        b\"exec dd if=/dev/zero of=/tmp/test_oom_monitor_file bs=11M count=1\\n\",\n                    )\n                    .await\n                    .unwrap();\n                stdin.flush().await.unwrap();\n            }\n\n            // Wait for the oom message.\n            if let Some(item) = rx.recv().await {\n                assert_eq!(pid.to_string(), item, \"Receive error oom message\");\n            }\n\n            // Clean.\n            child_process.wait().await.unwrap();\n            cg.delete().unwrap();\n            remove_file(\"/tmp/test_oom_monitor_file\").await.unwrap();\n        }\n    }\n}\n"
  },
  {
    "path": "crates/runc-shim/src/common.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{\n    env,\n    future::Future,\n    io::IoSliceMut,\n    ops::Deref,\n    os::{\n        fd::{AsRawFd, FromRawFd, OwnedFd},\n        unix::io::RawFd,\n    },\n    path::Path,\n    sync::Arc,\n    time::Duration,\n};\n\nuse containerd_shim::{\n    api::{ExecProcessRequest, Options},\n    io_error, other, other_error,\n    util::IntoOption,\n    Error,\n};\nuse log::{debug, warn};\nuse nix::{\n    cmsg_space,\n    sys::{\n        socket::{recvmsg, ControlMessageOwned, MsgFlags, UnixAddr},\n        termios::tcgetattr,\n    },\n};\nuse oci_spec::runtime::{LinuxNamespaceType, Spec};\nuse runc::{\n    io::{Io, NullIo, FIFO},\n    options::GlobalOpts,\n    Runc, Spawner,\n};\nuse serde::Deserialize;\n\nuse super::io::Stdio;\n\npub const GROUP_LABELS: [&str; 2] = [\n    \"io.containerd.runc.v2.group\",\n    \"io.kubernetes.cri.sandbox-id\",\n];\npub const INIT_PID_FILE: &str = \"init.pid\";\npub const LOG_JSON_FILE: &str = \"log.json\";\npub const FIFO_SCHEME: &str = \"fifo\";\n\nconst TIMEOUT_DURATION: std::time::Duration = Duration::from_secs(3);\n\n#[derive(Deserialize)]\npub struct Log {\n    pub level: String,\n    pub msg: String,\n}\n\n#[derive(Default)]\npub struct ProcessIO {\n    pub uri: Option<String>,\n    pub io: Option<Arc<dyn Io>>,\n    pub copy: bool,\n}\n\npub fn create_io(\n    id: &str,\n    _io_uid: u32,\n    _io_gid: u32,\n    stdio: &Stdio,\n) -> containerd_shim::Result<ProcessIO> {\n    let mut pio = ProcessIO::default();\n    if stdio.is_null() {\n        let nio = NullIo::new().map_err(io_error!(e, \"new Null Io\"))?;\n        pio.io = Some(Arc::new(nio));\n        return Ok(pio);\n    }\n    let stdout = stdio.stdout.as_str();\n    let scheme_path = stdout.trim().split(\"://\").collect::<Vec<&str>>();\n    let scheme: &str;\n    if scheme_path.len() <= 1 {\n        // no scheme specified, default schema to fifo\n        scheme = FIFO_SCHEME;\n        pio.uri = Some(format!(\"{}://{}\", scheme, stdout));\n    } else {\n        scheme = scheme_path[0];\n        pio.uri = Some(stdout.to_string());\n    }\n\n    if scheme == FIFO_SCHEME {\n        debug!(\n            \"create named pipe io for container {}, stdin: {}, stdout: {}, stderr: {}\",\n            id,\n            stdio.stdin.as_str(),\n            stdio.stdout.as_str(),\n            stdio.stderr.as_str()\n        );\n        let io = FIFO {\n            stdin: stdio.stdin.to_string().none_if(|x| x.is_empty()),\n            stdout: stdio.stdout.to_string().none_if(|x| x.is_empty()),\n            stderr: stdio.stderr.to_string().none_if(|x| x.is_empty()),\n        };\n        pio.io = Some(Arc::new(io));\n        pio.copy = false;\n    }\n    Ok(pio)\n}\n\n#[derive(Default, Debug)]\npub struct ShimExecutor {}\n\npub fn get_spec_from_request(\n    req: &ExecProcessRequest,\n) -> containerd_shim::Result<oci_spec::runtime::Process> {\n    if let Some(val) = req.spec.as_ref() {\n        let mut p = serde_json::from_slice::<oci_spec::runtime::Process>(val.value.as_slice())?;\n        p.set_terminal(Some(req.terminal));\n        Ok(p)\n    } else {\n        Err(Error::InvalidArgument(\"no spec in request\".to_string()))\n    }\n}\n\npub fn check_kill_error(emsg: String) -> Error {\n    let emsg = emsg.to_lowercase();\n    if emsg.contains(\"process already finished\")\n        || emsg.contains(\"container not running\")\n        || emsg.contains(\"no such process\")\n    {\n        Error::NotFoundError(\"process already finished\".to_string())\n    } else if emsg.contains(\"does not exist\") {\n        Error::NotFoundError(\"no such container\".to_string())\n    } else {\n        other!(\"unknown error after kill {}\", emsg)\n    }\n}\n\nconst DEFAULT_RUNC_ROOT: &str = \"/run/containerd/runc\";\nconst DEFAULT_COMMAND: &str = \"runc\";\n\npub fn create_runc(\n    runtime: &str,\n    namespace: &str,\n    bundle: impl AsRef<Path>,\n    opts: &Options,\n    spawner: Option<Arc<dyn Spawner + Send + Sync>>,\n) -> containerd_shim::Result<Runc> {\n    let runtime = if runtime.is_empty() {\n        DEFAULT_COMMAND\n    } else {\n        runtime\n    };\n    let root = opts.root.as_str();\n    let root = Path::new(if root.is_empty() {\n        DEFAULT_RUNC_ROOT\n    } else {\n        root\n    })\n    .join(namespace);\n\n    let log = bundle.as_ref().join(LOG_JSON_FILE);\n    let mut gopts = GlobalOpts::default()\n        .command(runtime)\n        .root(root)\n        .log(log)\n        .log_json()\n        .systemd_cgroup(opts.systemd_cgroup);\n    if let Some(s) = spawner {\n        gopts.custom_spawner(s);\n    }\n    gopts\n        .build()\n        .map_err(other_error!(\"unable to create runc instance\"))\n}\n\n#[derive(Default)]\npub(crate) struct CreateConfig {}\n\npub fn receive_socket(stream_fd: RawFd) -> containerd_shim::Result<OwnedFd> {\n    let mut buf = [0u8; 4096];\n    let mut iovec = [IoSliceMut::new(&mut buf)];\n    let mut space = cmsg_space!([RawFd; 2]);\n    let (path, fds) =\n        match recvmsg::<UnixAddr>(stream_fd, &mut iovec, Some(&mut space), MsgFlags::empty()) {\n            Ok(msg) => {\n                let iter = msg.cmsgs();\n                if let Some(ControlMessageOwned::ScmRights(fds)) = iter?.next() {\n                    (iovec[0].deref(), fds)\n                } else {\n                    return Err(other!(\"received message is empty\"));\n                }\n            }\n            Err(e) => {\n                return Err(other!(\"failed to receive message: {}\", e));\n            }\n        };\n    if fds.is_empty() {\n        return Err(other!(\"received message is empty\"));\n    }\n    let path = String::from_utf8(Vec::from(path)).unwrap_or_else(|e| {\n        warn!(\"failed to get path from array {}\", e);\n        \"\".to_string()\n    });\n\n    let fd = unsafe { OwnedFd::from_raw_fd(fds[0]) };\n\n    let path = path.trim_matches(char::from(0));\n    debug!(\n        \"copy_console: console socket get path: {}, fd: {}\",\n        path,\n        fd.as_raw_fd(),\n    );\n    tcgetattr(&fd)?;\n    Ok(fd)\n}\n\npub fn has_shared_pid_namespace(spec: &Spec) -> bool {\n    match spec.linux() {\n        None => true,\n        Some(linux) => match linux.namespaces() {\n            None => true,\n            Some(namespaces) => {\n                for ns in namespaces {\n                    if ns.typ() == LinuxNamespaceType::Pid && ns.path().is_none() {\n                        return false;\n                    }\n                }\n                true\n            }\n        },\n    }\n}\n\n/// Returns a temp dir. If the environment variable \"XDG_RUNTIME_DIR\" is set, return its value.\n/// Otherwise if `std::env::temp_dir()` failed, return current dir or return the temp dir depended on OS.\npub(crate) fn xdg_runtime_dir() -> String {\n    env::var(\"XDG_RUNTIME_DIR\")\n        .unwrap_or_else(|_| env::temp_dir().to_str().unwrap_or(\".\").to_string())\n}\n\npub async fn handle_file_open<F, Fut>(file_op: F) -> Result<tokio::fs::File, tokio::io::Error>\nwhere\n    F: FnOnce() -> Fut,\n    Fut: Future<Output = Result<tokio::fs::File, tokio::io::Error>> + Send,\n{\n    match tokio::time::timeout(TIMEOUT_DURATION, file_op()).await {\n        Ok(result) => result,\n        Err(_) => Err(std::io::Error::new(\n            std::io::ErrorKind::TimedOut,\n            \"File operation timed out\",\n        )),\n    }\n}\n"
  },
  {
    "path": "crates/runc-shim/src/console.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::path::{Path, PathBuf};\n\nuse containerd_shim::{io_error, util::mkdir, Error, Result};\nuse log::warn;\nuse tokio::net::{UnixListener, UnixStream};\nuse uuid::Uuid;\n\nuse crate::common::xdg_runtime_dir;\n\npub struct ConsoleSocket {\n    pub listener: UnixListener,\n    pub path: PathBuf,\n    pub rmdir: bool,\n}\n\nimpl ConsoleSocket {\n    pub async fn new() -> Result<ConsoleSocket> {\n        let dir = format!(\"{}/pty{}\", xdg_runtime_dir(), Uuid::new_v4());\n        mkdir(&dir, 0o711).await?;\n        let file_name = Path::new(&dir).join(\"pty.sock\");\n        let listener = UnixListener::bind(&file_name).map_err(io_error!(\n            e,\n            \"bind socket {}\",\n            file_name.display()\n        ))?;\n        Ok(ConsoleSocket {\n            listener,\n            path: file_name,\n            rmdir: true,\n        })\n    }\n\n    pub async fn accept(&self) -> Result<UnixStream> {\n        let (stream, _addr) = self\n            .listener\n            .accept()\n            .await\n            .map_err(io_error!(e, \"failed to list console socket\"))?;\n        Ok(stream)\n    }\n\n    // async drop is not supported yet, we can only call clean manually after socket received\n    pub async fn clean(self) {\n        if self.rmdir {\n            if let Some(tmp_socket_dir) = self.path.parent() {\n                tokio::fs::remove_dir_all(tmp_socket_dir)\n                    .await\n                    .unwrap_or_else(|e| {\n                        warn!(\n                            \"remove tmp console socket path {} : {}\",\n                            tmp_socket_dir.display(),\n                            e\n                        )\n                    })\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "crates/runc-shim/src/container.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::collections::HashMap;\n\nuse async_trait::async_trait;\nuse containerd_shim::{\n    api::Status,\n    error::Result,\n    protos::{\n        api::{CreateTaskRequest, ExecProcessRequest, ProcessInfo, StateResponse},\n        cgroups::metrics::Metrics,\n        protobuf::{well_known_types::any::Any, EnumOrUnknown, Message, MessageDyn},\n        shim::oci::ProcessDetails,\n    },\n    Error,\n};\nuse log::debug;\nuse oci_spec::runtime::LinuxResources;\nuse time::OffsetDateTime;\nuse tokio::sync::oneshot::Receiver;\n\nuse super::processes::Process;\n\n#[async_trait]\npub trait Container {\n    async fn start(&mut self, exec_id: Option<&str>) -> Result<i32>;\n    async fn state(&self, exec_id: Option<&str>) -> Result<StateResponse>;\n    async fn kill(&mut self, exec_id: Option<&str>, signal: u32, all: bool) -> Result<()>;\n    async fn wait_channel(&mut self, exec_id: Option<&str>) -> Result<Receiver<()>>;\n    async fn get_exit_info(\n        &self,\n        exec_id: Option<&str>,\n    ) -> Result<(i32, i32, Option<OffsetDateTime>)>;\n    async fn delete(\n        &mut self,\n        exec_id_opt: Option<&str>,\n    ) -> Result<(i32, i32, Option<OffsetDateTime>)>;\n    async fn exec(&mut self, req: ExecProcessRequest) -> Result<()>;\n    async fn resize_pty(&mut self, exec_id: Option<&str>, height: u32, width: u32) -> Result<()>;\n    async fn pid(&self) -> i32;\n    async fn id(&self) -> String;\n    async fn update(&mut self, resources: &LinuxResources) -> Result<()>;\n    async fn stats(&self) -> Result<Metrics>;\n    async fn all_processes(&self) -> Result<Vec<ProcessInfo>>;\n    async fn close_io(&mut self, exec_id: Option<&str>) -> Result<()>;\n    async fn pause(&mut self) -> Result<()>;\n    async fn resume(&mut self) -> Result<()>;\n    async fn init_state(&self) -> EnumOrUnknown<Status>;\n}\n\n#[async_trait]\npub trait ContainerFactory<C> {\n    async fn create(&self, ns: &str, req: &CreateTaskRequest) -> Result<C>;\n    async fn cleanup(&self, ns: &str, c: &C) -> Result<()>;\n}\n\n#[async_trait]\npub trait ProcessFactory<E> {\n    async fn create(&self, req: &ExecProcessRequest) -> Result<E>;\n}\n\n/// ContainerTemplate is a template struct to implement Container,\n/// most of the methods can be delegated to either init process or exec process.\n/// that's why we provides a ContainerTemplate struct,\n/// library users only need to implements Process for their own.\npub struct ContainerTemplate<T, E, P> {\n    /// container id\n    pub id: String,\n    /// container bundle path\n    pub bundle: String,\n    /// init process of this container\n    pub init: T,\n    /// process factory that create processes when exec\n    pub process_factory: P,\n    /// exec processes of this container\n    pub processes: HashMap<String, E>,\n}\n\n#[async_trait]\nimpl<T, E, P> Container for ContainerTemplate<T, E, P>\nwhere\n    T: Process + Send + Sync,\n    E: Process + Send + Sync,\n    P: ProcessFactory<E> + Send + Sync,\n{\n    async fn init_state(&self) -> EnumOrUnknown<Status> {\n        // Default should be unknown\n        self.init.state().await.unwrap_or_default().status\n    }\n\n    async fn start(&mut self, exec_id: Option<&str>) -> Result<i32> {\n        let process = self.get_mut_process(exec_id)?;\n        process.start().await?;\n        Ok(process.pid().await)\n    }\n\n    async fn state(&self, exec_id: Option<&str>) -> Result<StateResponse> {\n        let process = self.get_process(exec_id)?;\n        let mut resp = process.state().await?;\n        let init_state = self.init.state().await?.status;\n        if init_state == EnumOrUnknown::new(Status::PAUSING)\n            || init_state == EnumOrUnknown::new(Status::PAUSED)\n        {\n            resp.status = init_state;\n        }\n        resp.bundle = self.bundle.to_string();\n        debug!(\"container state: {:?}\", resp);\n        Ok(resp)\n    }\n\n    async fn kill(&mut self, exec_id: Option<&str>, signal: u32, all: bool) -> Result<()> {\n        let process = self.get_mut_process(exec_id)?;\n        process.kill(signal, all).await\n    }\n\n    async fn wait_channel(&mut self, exec_id: Option<&str>) -> Result<Receiver<()>> {\n        let process = self.get_mut_process(exec_id)?;\n        process.wait_channel().await\n    }\n\n    async fn get_exit_info(\n        &self,\n        exec_id: Option<&str>,\n    ) -> Result<(i32, i32, Option<OffsetDateTime>)> {\n        let process = self.get_process(exec_id)?;\n        Ok((\n            process.pid().await,\n            process.exit_code().await,\n            process.exited_at().await,\n        ))\n    }\n\n    async fn delete(\n        &mut self,\n        exec_id_opt: Option<&str>,\n    ) -> Result<(i32, i32, Option<OffsetDateTime>)> {\n        let (pid, code, exited_at) = self.get_exit_info(exec_id_opt).await?;\n        let process = self.get_mut_process(exec_id_opt);\n        match process {\n            Ok(p) => p.delete().await?,\n            Err(e) => return Err(e),\n        }\n        if let Some(exec_id) = exec_id_opt {\n            self.processes.remove(exec_id);\n        }\n        Ok((pid, code, exited_at))\n    }\n\n    async fn exec(&mut self, req: ExecProcessRequest) -> Result<()> {\n        let exec_id = req.exec_id.to_string();\n        let exec_process = self.process_factory.create(&req).await?;\n        self.processes.insert(exec_id, exec_process);\n        Ok(())\n    }\n\n    async fn resize_pty(&mut self, exec_id: Option<&str>, height: u32, width: u32) -> Result<()> {\n        let process = self.get_mut_process(exec_id)?;\n        process.resize_pty(height, width).await\n    }\n\n    async fn pid(&self) -> i32 {\n        self.init.pid().await\n    }\n\n    async fn id(&self) -> String {\n        self.id.to_string()\n    }\n\n    #[cfg(target_os = \"linux\")]\n    async fn update(&mut self, resources: &LinuxResources) -> Result<()> {\n        self.init.update(resources).await\n    }\n\n    #[cfg(not(target_os = \"linux\"))]\n    async fn update(&mut self, _resources: &LinuxResources) -> Result<()> {\n        Err(Error::Unimplemented(\"update\".to_string()))\n    }\n\n    #[cfg(target_os = \"linux\")]\n    async fn stats(&self) -> Result<Metrics> {\n        self.init.stats().await\n    }\n\n    #[cfg(not(target_os = \"linux\"))]\n    async fn stats(&self) -> Result<Metrics> {\n        Err(Error::Unimplemented(\"stats\".to_string()))\n    }\n\n    async fn all_processes(&self) -> Result<Vec<ProcessInfo>> {\n        let mut processes_info = self.init.ps().await?;\n        for process_info in &mut processes_info {\n            for (exec_id, process) in &self.processes {\n                if process_info.pid as i32 == process.pid().await {\n                    let process_details = ProcessDetails {\n                        exec_id: exec_id.to_string(),\n                        special_fields: Default::default(),\n                    };\n                    let v = Any {\n                        type_url: process_details.descriptor_dyn().full_name().to_string(),\n                        value: process_details.write_to_bytes()?,\n                        special_fields: Default::default(),\n                    };\n                    process_info.set_info(v);\n                    break;\n                }\n            }\n        }\n        Ok(processes_info)\n    }\n\n    async fn close_io(&mut self, exec_id: Option<&str>) -> Result<()> {\n        let process = self.get_mut_process(exec_id)?;\n        process.close_io().await\n    }\n\n    async fn pause(&mut self) -> Result<()> {\n        self.init.pause().await\n    }\n\n    async fn resume(&mut self) -> Result<()> {\n        self.init.resume().await\n    }\n}\n\nimpl<T, E, P> ContainerTemplate<T, E, P>\nwhere\n    T: Process + Send + Sync,\n    E: Process + Send + Sync,\n{\n    pub fn get_process(&self, exec_id: Option<&str>) -> Result<&(dyn Process + Send + Sync)> {\n        match exec_id {\n            Some(exec_id) => {\n                let p = self.processes.get(exec_id).ok_or_else(|| {\n                    Error::NotFoundError(\"can not find the exec by id\".to_string())\n                })?;\n                Ok(p)\n            }\n            None => Ok(&self.init),\n        }\n    }\n\n    pub fn get_mut_process(\n        &mut self,\n        exec_id: Option<&str>,\n    ) -> Result<&mut (dyn Process + Send + Sync)> {\n        match exec_id {\n            Some(exec_id) => {\n                let p = self.processes.get_mut(exec_id).ok_or_else(|| {\n                    Error::NotFoundError(format!(\"can not find the exec by id {}\", exec_id))\n                })?;\n                Ok(p)\n            }\n            None => Ok(&mut self.init),\n        }\n    }\n}\n"
  },
  {
    "path": "crates/runc-shim/src/io.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n#[derive(Clone, Debug, Default)]\npub struct Stdio {\n    pub stdin: String,\n    pub stdout: String,\n    pub stderr: String,\n    pub terminal: bool,\n}\n\nimpl Stdio {\n    pub fn new(stdin: &str, stdout: &str, stderr: &str, terminal: bool) -> Self {\n        Self {\n            stdin: stdin.to_string(),\n            stdout: stdout.to_string(),\n            stderr: stderr.to_string(),\n            terminal,\n        }\n    }\n\n    pub fn is_null(&self) -> bool {\n        self.stdin.is_empty() && self.stdout.is_empty() && self.stderr.is_empty()\n    }\n}\n"
  },
  {
    "path": "crates/runc-shim/src/main.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{env, io::Write};\n\nuse containerd_shim::{\n    asynchronous::run,\n    parse,\n    protos::protobuf::{well_known_types::any::Any, Message},\n    run_info,\n};\n\nmod cgroup_memory;\nmod common;\nmod console;\nmod container;\nmod io;\nmod processes;\nmod runc;\nmod service;\nmod task;\n\nuse service::Service;\n\nfn parse_version() {\n    let os_args: Vec<_> = env::args_os().collect();\n    let flags = match parse(&os_args[1..]) {\n        Ok(flags) => flags,\n        Err(e) => {\n            eprintln!(\"Error parsing arguments: {}\", e);\n            std::process::exit(1);\n        }\n    };\n    if flags.version {\n        println!(\"{}:\", os_args[0].to_string_lossy());\n        println!(\"  Version: {}\", env!(\"CARGO_PKG_VERSION\"));\n        println!(\"  Revision: {}\", env!(\"CARGO_GIT_HASH\"));\n        println!();\n\n        std::process::exit(0);\n    }\n    if flags.info {\n        let r = run_info();\n        match r {\n            Ok(rinfo) => {\n                let mut info = Any::new();\n                info.type_url = \"io.containerd.runc.v2.Info\".to_string();\n                info.value = match rinfo.write_to_bytes() {\n                    Ok(bytes) => bytes,\n                    Err(e) => {\n                        eprintln!(\"Failed to write runtime info to bytes: {}\", e);\n                        std::process::exit(1);\n                    }\n                };\n                std::io::stdout()\n                    .write_all(info.write_to_bytes().unwrap().as_slice())\n                    .expect(\"Failed to write to stdout\");\n            }\n            Err(_) => {\n                eprintln!(\"Failed to get runtime info\");\n                std::process::exit(1);\n            }\n        }\n        std::process::exit(0);\n    }\n}\n\n#[tokio::main]\nasync fn main() {\n    parse_version();\n    run::<Service>(\"io.containerd.runc.v2-rs\", None).await;\n}\n"
  },
  {
    "path": "crates/runc-shim/src/processes.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::sync::{Arc, Mutex};\n\nuse async_trait::async_trait;\nuse containerd_shim::{\n    protos::{\n        api::{ProcessInfo, StateResponse, Status},\n        cgroups::metrics::Metrics,\n        protobuf::well_known_types::timestamp::Timestamp,\n    },\n    Console, Result,\n};\nuse oci_spec::runtime::LinuxResources;\nuse rustix::termios::{tcsetwinsize, Winsize};\nuse time::OffsetDateTime;\nuse tokio::{\n    fs::File,\n    sync::oneshot::{channel, Receiver, Sender},\n};\n\nuse crate::io::Stdio;\n\n#[allow(dead_code)]\n#[async_trait]\npub trait Process {\n    async fn start(&mut self) -> Result<()>;\n    async fn set_exited(&mut self, exit_code: i32);\n    async fn pid(&self) -> i32;\n    async fn state(&self) -> Result<StateResponse>;\n    async fn kill(&mut self, signal: u32, all: bool) -> Result<()>;\n    async fn delete(&mut self) -> Result<()>;\n    async fn wait_channel(&mut self) -> Result<Receiver<()>>;\n    async fn exit_code(&self) -> i32;\n    async fn exited_at(&self) -> Option<OffsetDateTime>;\n    async fn resize_pty(&mut self, height: u32, width: u32) -> Result<()>;\n    async fn update(&mut self, resources: &LinuxResources) -> Result<()>;\n    async fn stats(&self) -> Result<Metrics>;\n    async fn ps(&self) -> Result<Vec<ProcessInfo>>;\n    async fn close_io(&mut self) -> Result<()>;\n    async fn pause(&mut self) -> Result<()>;\n    async fn resume(&mut self) -> Result<()>;\n    async fn id(&self) -> &str;\n}\n#[allow(dead_code)]\n#[async_trait]\npub trait ProcessLifecycle<P: Process> {\n    async fn start(&self, p: &mut P) -> Result<()>;\n    async fn kill(&self, p: &mut P, signal: u32, all: bool) -> Result<()>;\n    async fn delete(&self, p: &mut P) -> Result<()>;\n    async fn update(&self, p: &mut P, resources: &LinuxResources) -> Result<()>;\n    async fn stats(&self, p: &P) -> Result<Metrics>;\n    async fn ps(&self, p: &P) -> Result<Vec<ProcessInfo>>;\n    async fn pause(&self, p: &mut P) -> Result<()>;\n    async fn resume(&self, p: &mut P) -> Result<()>;\n}\n\npub struct ProcessTemplate<S> {\n    pub state: Status,\n    pub id: String,\n    pub stdio: Stdio,\n    pub pid: i32,\n    pub exit_code: i32,\n    pub exited_at: Option<OffsetDateTime>,\n    pub wait_chan_tx: Vec<Sender<()>>,\n    pub console: Option<Console>,\n    pub lifecycle: Arc<S>,\n    pub stdin: Arc<Mutex<Option<File>>>,\n}\n\nimpl<S> ProcessTemplate<S> {\n    pub fn new(id: &str, stdio: Stdio, lifecycle: S) -> Self {\n        Self {\n            state: Status::CREATED,\n            id: id.to_string(),\n            stdio,\n            pid: 0,\n            exit_code: 0,\n            exited_at: None,\n            wait_chan_tx: vec![],\n            console: None,\n            lifecycle: Arc::new(lifecycle),\n            stdin: Arc::new(Mutex::new(None)),\n        }\n    }\n}\n\n#[async_trait]\nimpl<S> Process for ProcessTemplate<S>\nwhere\n    S: ProcessLifecycle<Self> + Sync + Send,\n{\n    async fn start(&mut self) -> Result<()> {\n        self.lifecycle.clone().start(self).await?;\n        Ok(())\n    }\n\n    async fn set_exited(&mut self, exit_code: i32) {\n        self.state = Status::STOPPED;\n        self.exit_code = exit_code;\n        self.exited_at = Some(OffsetDateTime::now_utc());\n        // set wait_chan_tx to empty, to trigger the drop of the initialized Receiver.\n        self.wait_chan_tx = vec![];\n    }\n\n    async fn pid(&self) -> i32 {\n        self.pid\n    }\n\n    async fn id(&self) -> &str {\n        self.id.as_str()\n    }\n\n    async fn state(&self) -> Result<StateResponse> {\n        let mut resp = StateResponse::new();\n        resp.id = self.id.to_string();\n        resp.set_status(self.state);\n        resp.pid = self.pid as u32;\n        resp.terminal = self.stdio.terminal;\n        resp.stdin = self.stdio.stdin.to_string();\n        resp.stdout = self.stdio.stdout.to_string();\n        resp.stderr = self.stdio.stderr.to_string();\n        resp.exit_status = self.exit_code as u32;\n        if let Some(exit_at) = self.exited_at {\n            let mut time_stamp = Timestamp::new();\n            time_stamp.seconds = exit_at.unix_timestamp();\n            time_stamp.nanos = exit_at.nanosecond() as i32;\n            resp.exited_at = Some(time_stamp).into();\n        }\n        Ok(resp)\n    }\n\n    async fn kill(&mut self, signal: u32, all: bool) -> Result<()> {\n        self.lifecycle.clone().kill(self, signal, all).await\n    }\n\n    async fn delete(&mut self) -> Result<()> {\n        self.lifecycle.clone().delete(self).await\n    }\n\n    async fn wait_channel(&mut self) -> Result<Receiver<()>> {\n        let (tx, rx) = channel::<()>();\n        if self.state != Status::STOPPED {\n            self.wait_chan_tx.push(tx);\n        }\n        Ok(rx)\n    }\n\n    async fn exit_code(&self) -> i32 {\n        self.exit_code\n    }\n\n    async fn exited_at(&self) -> Option<OffsetDateTime> {\n        self.exited_at\n    }\n\n    async fn resize_pty(&mut self, height: u32, width: u32) -> Result<()> {\n        if let Some(console) = self.console.as_ref() {\n            let w = Winsize {\n                ws_row: height as u16,\n                ws_col: width as u16,\n                ws_xpixel: 0,\n                ws_ypixel: 0,\n            };\n            tcsetwinsize(&console.file, w)\n                .map_err(|e| containerd_shim::Error::Other(e.to_string()))?;\n        }\n        Ok(())\n    }\n\n    async fn update(&mut self, resources: &LinuxResources) -> Result<()> {\n        self.lifecycle.clone().update(self, resources).await\n    }\n\n    async fn stats(&self) -> Result<Metrics> {\n        self.lifecycle.stats(self).await\n    }\n\n    async fn ps(&self) -> Result<Vec<ProcessInfo>> {\n        self.lifecycle.ps(self).await\n    }\n\n    async fn close_io(&mut self) -> Result<()> {\n        let mut lock_guard = self.stdin.lock().unwrap();\n        if let Some(stdin_w_file) = lock_guard.take() {\n            drop(stdin_w_file);\n        }\n        Ok(())\n    }\n\n    async fn pause(&mut self) -> Result<()> {\n        self.lifecycle.clone().pause(self).await\n    }\n\n    async fn resume(&mut self) -> Result<()> {\n        self.lifecycle.clone().resume(self).await\n    }\n}\n"
  },
  {
    "path": "crates/runc-shim/src/runc.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n#[cfg(target_os = \"linux\")]\nuse std::sync::RwLock;\nuse std::{\n    convert::TryFrom,\n    os::{\n        fd::{IntoRawFd, OwnedFd},\n        unix::{\n            io::{AsRawFd, FromRawFd},\n            prelude::ExitStatusExt,\n        },\n    },\n    path::{Path, PathBuf},\n    process::ExitStatus,\n    sync::{Arc, Mutex},\n};\n\nuse async_trait::async_trait;\n#[cfg(target_os = \"linux\")]\nuse cgroups_rs::fs::Cgroup;\nuse containerd_shim::{\n    api::{CreateTaskRequest, ExecProcessRequest, Options, Status},\n    asynchronous::monitor::{monitor_subscribe, monitor_unsubscribe, Subscription},\n    io_error,\n    monitor::{ExitEvent, Subject, Topic},\n    mount::umount_recursive,\n    other, other_error,\n    protos::{\n        api::ProcessInfo,\n        cgroups::metrics::Metrics,\n        protobuf::{CodedInputStream, Message},\n    },\n    util::{asyncify, mkdir, mount_rootfs, read_file_to_str, write_options, write_runtime},\n    Console, Error, ExitSignal, Result,\n};\nuse log::{debug, error};\nuse nix::{sys::signal::kill, unistd::Pid};\nuse oci_spec::runtime::{LinuxResources, Process};\nuse runc::{Command, Runc, Spawner};\nuse tokio::{\n    fs::{remove_file, File, OpenOptions},\n    io::{AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncWrite, BufReader},\n};\n\nuse super::{\n    console::ConsoleSocket,\n    container::{ContainerFactory, ContainerTemplate, ProcessFactory},\n    processes::{ProcessLifecycle, ProcessTemplate},\n};\nuse crate::{\n    common::{\n        check_kill_error, create_io, create_runc, get_spec_from_request, handle_file_open,\n        receive_socket, CreateConfig, Log, ProcessIO, ShimExecutor, INIT_PID_FILE, LOG_JSON_FILE,\n    },\n    io::Stdio,\n};\n\npub type ExecProcess = ProcessTemplate<RuncExecLifecycle>;\npub type InitProcess = ProcessTemplate<RuncInitLifecycle>;\n\npub type RuncContainer = ContainerTemplate<InitProcess, ExecProcess, RuncExecFactory>;\n\n#[derive(Clone, Default)]\npub(crate) struct RuncFactory {}\n\n#[async_trait]\nimpl ContainerFactory<RuncContainer> for RuncFactory {\n    async fn create(\n        &self,\n        ns: &str,\n        req: &CreateTaskRequest,\n    ) -> containerd_shim::Result<RuncContainer> {\n        let bundle = req.bundle();\n        let mut opts = Options::new();\n        if let Some(any) = req.options.as_ref() {\n            let mut input = CodedInputStream::from_bytes(any.value.as_ref());\n            opts.merge_from(&mut input)?;\n        }\n        if opts.compute_size() > 0 {\n            debug!(\"create options: {:?}\", &opts);\n        }\n        let runtime = opts.binary_name.as_str();\n        write_options(bundle, &opts).await?;\n        write_runtime(bundle, runtime).await?;\n\n        let rootfs_vec = req.rootfs().to_vec();\n        let rootfs = if !rootfs_vec.is_empty() {\n            let tmp_rootfs = Path::new(bundle).join(\"rootfs\");\n            mkdir(&tmp_rootfs, 0o711).await?;\n            tmp_rootfs\n        } else {\n            PathBuf::new()\n        };\n\n        for m in rootfs_vec {\n            mount_rootfs(&m, rootfs.as_path()).await?\n        }\n\n        let runc = create_runc(\n            runtime,\n            ns,\n            bundle,\n            &opts,\n            Some(Arc::new(ShimExecutor::default())),\n        )?;\n\n        let id = req.id();\n        let stdio = Stdio::new(req.stdin(), req.stdout(), req.stderr(), req.terminal());\n\n        let mut init = InitProcess::new(\n            id,\n            stdio,\n            RuncInitLifecycle::new(runc.clone(), opts.clone(), bundle),\n        );\n\n        let config = CreateConfig::default();\n        self.do_create(&mut init, config).await?;\n        #[cfg(target_os = \"linux\")]\n        {\n            *init.lifecycle.cgroup_cache.write().unwrap() =\n                containerd_shim::cgroup::get_cgroup(init.pid as u32).ok();\n        }\n\n        let container = RuncContainer {\n            id: id.to_string(),\n            bundle: bundle.to_string(),\n            init,\n            process_factory: RuncExecFactory {\n                runtime: runc,\n                bundle: bundle.to_string(),\n                io_uid: opts.io_uid,\n                io_gid: opts.io_gid,\n            },\n            processes: Default::default(),\n        };\n\n        Ok(container)\n    }\n\n    async fn cleanup(&self, _ns: &str, _c: &RuncContainer) -> containerd_shim::Result<()> {\n        Ok(())\n    }\n}\n\nimpl RuncFactory {\n    async fn do_create(&self, init: &mut InitProcess, _config: CreateConfig) -> Result<()> {\n        let id = init.id.to_string();\n        let stdio = &init.stdio;\n        let opts = &init.lifecycle.opts;\n        let bundle = &init.lifecycle.bundle;\n        let pid_path = Path::new(bundle).join(INIT_PID_FILE);\n        let mut create_opts = runc::options::CreateOpts::new()\n            .pid_file(&pid_path)\n            .no_pivot(opts.no_pivot_root)\n            .no_new_keyring(opts.no_new_keyring)\n            .detach(false);\n        let (socket, pio) = if stdio.terminal {\n            let s = ConsoleSocket::new().await?;\n            create_opts.console_socket = Some(s.path.to_owned());\n            (Some(s), None)\n        } else {\n            let pio = create_io(&id, opts.io_uid, opts.io_gid, stdio)?;\n            create_opts.io = pio.io.as_ref().cloned();\n            (None, Some(pio))\n        };\n\n        let resp = init\n            .lifecycle\n            .runtime\n            .create(&id, bundle, Some(&create_opts))\n            .await;\n        if let Err(e) = resp {\n            if let Some(s) = socket {\n                s.clean().await;\n            }\n            return Err(runtime_error(bundle, e, \"OCI runtime create failed\").await);\n        }\n        copy_io_or_console(init, socket, pio, init.lifecycle.exit_signal.clone()).await?;\n        let pid = read_file_to_str(pid_path).await?.parse::<i32>()?;\n        init.pid = pid;\n        Ok(())\n    }\n}\n\n// runtime_error will read the OCI runtime logfile retrieving OCI runtime error\npub async fn runtime_error(bundle: &str, e: runc::error::Error, msg: &str) -> Error {\n    let mut rt_msg = String::new();\n    match File::open(Path::new(bundle).join(LOG_JSON_FILE)).await {\n        Err(err) => other!(\"{}: unable to open OCI runtime log file){}\", msg, err),\n        Ok(file) => {\n            let mut lines = BufReader::new(file).lines();\n            while let Ok(Some(line)) = lines.next_line().await {\n                // Retrieve the last runtime error\n                match serde_json::from_str::<Log>(&line) {\n                    Err(err) => return other!(\"{}: unable to parse log msg: {}\", msg, err),\n                    Ok(log) => {\n                        if log.level == \"error\" {\n                            rt_msg = log.msg.trim().to_string();\n                        }\n                    }\n                }\n            }\n            if !rt_msg.is_empty() {\n                other!(\"{}: {}\", msg, rt_msg)\n            } else {\n                other!(\"{}: (no OCI runtime error in logfile) {}\", msg, e)\n            }\n        }\n    }\n}\n\npub struct RuncExecFactory {\n    runtime: Runc,\n    bundle: String,\n    io_uid: u32,\n    io_gid: u32,\n}\n\n#[async_trait]\nimpl ProcessFactory<ExecProcess> for RuncExecFactory {\n    async fn create(&self, req: &ExecProcessRequest) -> Result<ExecProcess> {\n        let p = get_spec_from_request(req)?;\n        Ok(ExecProcess {\n            state: Status::CREATED,\n            id: req.exec_id.to_string(),\n            stdio: Stdio {\n                stdin: req.stdin.to_string(),\n                stdout: req.stdout.to_string(),\n                stderr: req.stderr.to_string(),\n                terminal: req.terminal,\n            },\n            pid: 0,\n            exit_code: 0,\n            exited_at: None,\n            wait_chan_tx: vec![],\n            console: None,\n            lifecycle: Arc::from(RuncExecLifecycle {\n                runtime: self.runtime.clone(),\n                bundle: self.bundle.to_string(),\n                container_id: req.id.to_string(),\n                io_uid: self.io_uid,\n                io_gid: self.io_gid,\n                spec: p,\n                exit_signal: Default::default(),\n            }),\n            stdin: Arc::new(Mutex::new(None)),\n        })\n    }\n}\n\npub struct RuncInitLifecycle {\n    runtime: Runc,\n    opts: Options,\n    bundle: String,\n    exit_signal: Arc<ExitSignal>,\n    /// Cache for cgroup paths to avoid repeated /proc/<pid>/cgroup parsing\n    #[cfg(target_os = \"linux\")]\n    cgroup_cache: RwLock<Option<Cgroup>>,\n}\n\n#[async_trait]\nimpl ProcessLifecycle<InitProcess> for RuncInitLifecycle {\n    async fn start(&self, p: &mut InitProcess) -> containerd_shim::Result<()> {\n        if let Err(e) = self.runtime.start(p.id.as_str()).await {\n            return Err(runtime_error(&p.lifecycle.bundle, e, \"OCI runtime start failed\").await);\n        }\n        p.state = Status::RUNNING;\n        Ok(())\n    }\n\n    async fn kill(\n        &self,\n        p: &mut InitProcess,\n        signal: u32,\n        all: bool,\n    ) -> containerd_shim::Result<()> {\n        self.runtime\n            .kill(\n                p.id.as_str(),\n                signal,\n                Some(&runc::options::KillOpts { all }),\n            )\n            .await\n            .map_err(|e| check_kill_error(e.to_string()))\n    }\n\n    async fn delete(&self, p: &mut InitProcess) -> containerd_shim::Result<()> {\n        if let Err(e) = self\n            .runtime\n            .delete(\n                p.id.as_str(),\n                Some(&runc::options::DeleteOpts { force: true }),\n            )\n            .await\n        {\n            if !e.to_string().to_lowercase().contains(\"does not exist\") {\n                return Err(\n                    runtime_error(&p.lifecycle.bundle, e, \"OCI runtime delete failed\").await,\n                );\n            }\n        }\n        umount_recursive(Path::new(&self.bundle).join(\"rootfs\").to_str(), 0)?;\n        self.exit_signal.signal();\n        Ok(())\n    }\n\n    #[cfg(target_os = \"linux\")]\n    async fn update(&self, p: &mut InitProcess, resources: &LinuxResources) -> Result<()> {\n        if p.pid <= 0 {\n            return Err(other!(\n                \"failed to update resources because init process is {}\",\n                p.pid\n            ));\n        }\n\n        // Check if cgroup still exists before attempting update\n        if !self.ensure_init_cgroup_exists().await {\n            return Err(other!(\n                \"failed to update resources because cgroup for process {} has been released\",\n                p.pid\n            ));\n        }\n        let cgroup_guard = p.lifecycle.cgroup_cache.read().unwrap();\n        let cgroup = cgroup_guard\n            .as_ref()\n            .ok_or_else(|| other!(\"cgroup cache is empty for process {}\", p.pid))?;\n        containerd_shim::cgroup::update_resources(cgroup, resources)\n    }\n\n    #[cfg(not(target_os = \"linux\"))]\n    async fn update(&self, _p: &mut InitProcess, _resources: &LinuxResources) -> Result<()> {\n        Err(Error::Unimplemented(\"update resource\".to_string()))\n    }\n\n    #[cfg(target_os = \"linux\")]\n    async fn stats(&self, p: &InitProcess) -> Result<Metrics> {\n        if p.pid <= 0 {\n            return Err(other!(\n                \"failed to collect metrics because init process is {}\",\n                p.pid\n            ));\n        }\n\n        // Check if cgroup still exists before attempting to collect stats\n        if !self.ensure_init_cgroup_exists().await {\n            return Err(other!(\n                \"failed to collect metrics because cgroup for process {} has been released\",\n                p.pid\n            ));\n        }\n        let cgroup_guard = p.lifecycle.cgroup_cache.read().unwrap();\n        let cgroup = cgroup_guard\n            .as_ref()\n            .ok_or_else(|| other!(\"cgroup cache is empty for process {}\", p.pid))?;\n        containerd_shim::cgroup::collect_metrics(cgroup)\n    }\n\n    #[cfg(not(target_os = \"linux\"))]\n    async fn stats(&self, _p: &InitProcess) -> Result<Metrics> {\n        Err(Error::Unimplemented(\"process stats\".to_string()))\n    }\n\n    async fn ps(&self, p: &InitProcess) -> Result<Vec<ProcessInfo>> {\n        let pids = self\n            .runtime\n            .ps(&p.id)\n            .await\n            .map_err(other_error!(\"failed to execute runc ps\"))?;\n        Ok(pids\n            .iter()\n            .map(|&x| ProcessInfo {\n                pid: x as u32,\n                ..Default::default()\n            })\n            .collect())\n    }\n\n    #[cfg(target_os = \"linux\")]\n    async fn pause(&self, p: &mut InitProcess) -> Result<()> {\n        match p.state {\n            Status::RUNNING => {\n                p.state = Status::PAUSING;\n                if let Err(e) = self.runtime.pause(p.id.as_str()).await {\n                    p.state = Status::RUNNING;\n                    return Err(runtime_error(&self.bundle, e, \"OCI runtime pause failed\").await);\n                }\n                p.state = Status::PAUSED;\n                Ok(())\n            }\n            _ => Err(other!(\"cannot pause when in {:?} state\", p.state)),\n        }\n    }\n\n    #[cfg(not(target_os = \"linux\"))]\n    async fn pause(&self, _p: &mut InitProcess) -> Result<()> {\n        Err(Error::Unimplemented(\"pause\".to_string()))\n    }\n\n    #[cfg(target_os = \"linux\")]\n    async fn resume(&self, p: &mut InitProcess) -> Result<()> {\n        match p.state {\n            Status::PAUSED => {\n                if let Err(e) = self.runtime.resume(p.id.as_str()).await {\n                    return Err(runtime_error(&self.bundle, e, \"OCI runtime pause failed\").await);\n                }\n                p.state = Status::RUNNING;\n                Ok(())\n            }\n            _ => Err(other!(\"cannot resume when in {:?} state\", p.state)),\n        }\n    }\n\n    #[cfg(not(target_os = \"linux\"))]\n    async fn resume(&self, _p: &mut InitProcess) -> Result<()> {\n        Err(Error::Unimplemented(\"resume\".to_string()))\n    }\n}\n\nimpl RuncInitLifecycle {\n    pub fn new(runtime: Runc, opts: Options, bundle: &str) -> Self {\n        Self {\n            runtime,\n            opts,\n            bundle: bundle.to_string(),\n            exit_signal: Default::default(),\n            #[cfg(target_os = \"linux\")]\n            cgroup_cache: RwLock::new(None),\n        }\n    }\n\n    /// Ensure cgroup exists and cache the path information\n    /// Returns true if cgroup exists, false if released\n    #[cfg(target_os = \"linux\")]\n    async fn ensure_init_cgroup_exists(&self) -> bool {\n        let cache = self.cgroup_cache.read().unwrap();\n        if let Some(ref cached) = *cache {\n            cached.exists()\n        } else {\n            false\n        }\n    }\n}\n\npub struct RuncExecLifecycle {\n    runtime: Runc,\n    bundle: String,\n    container_id: String,\n    io_uid: u32,\n    io_gid: u32,\n    spec: Process,\n    exit_signal: Arc<ExitSignal>,\n}\n\n#[async_trait]\nimpl ProcessLifecycle<ExecProcess> for RuncExecLifecycle {\n    async fn start(&self, p: &mut ExecProcess) -> containerd_shim::Result<()> {\n        let bundle = self.bundle.to_string();\n        let pid_path = Path::new(&bundle).join(format!(\"{}.pid\", &p.id));\n        let mut exec_opts = runc::options::ExecOpts {\n            io: None,\n            pid_file: Some(pid_path.to_owned()),\n            console_socket: None,\n            detach: true,\n        };\n        let (socket, pio) = if p.stdio.terminal {\n            let s = ConsoleSocket::new().await?;\n            exec_opts.console_socket = Some(s.path.to_owned());\n            (Some(s), None)\n        } else {\n            let pio = create_io(&p.id, self.io_uid, self.io_gid, &p.stdio)?;\n            exec_opts.io = pio.io.as_ref().cloned();\n            (None, Some(pio))\n        };\n        //TODO  checkpoint support\n        let exec_result = self\n            .runtime\n            .exec(&self.container_id, &self.spec, Some(&exec_opts))\n            .await;\n        if let Err(e) = exec_result {\n            if let Some(s) = socket {\n                s.clean().await;\n            }\n            return Err(runtime_error(&bundle, e, \"OCI runtime exec failed\").await);\n        }\n\n        if !p.stdio.stdin.is_empty() {\n            let stdin_clone = p.stdio.stdin.clone();\n            let stdin_w = p.stdin.clone();\n            // Open the write side in advance to make sure read side will not block,\n            // open it in another thread otherwise it will block too.\n            tokio::spawn(async move {\n                if let Ok(stdin_w_file) = OpenOptions::new()\n                    .write(true)\n                    .open(stdin_clone.as_str())\n                    .await\n                {\n                    let mut lock_guard = stdin_w.lock().unwrap();\n                    *lock_guard = Some(stdin_w_file);\n                }\n            });\n        }\n\n        copy_io_or_console(p, socket, pio, p.lifecycle.exit_signal.clone()).await?;\n        let pid = read_file_to_str(pid_path).await?.parse::<i32>()?;\n        p.pid = pid;\n        p.state = Status::RUNNING;\n        Ok(())\n    }\n\n    async fn kill(\n        &self,\n        p: &mut ExecProcess,\n        signal: u32,\n        _all: bool,\n    ) -> containerd_shim::Result<()> {\n        if p.pid <= 0 {\n            Err(Error::FailedPreconditionError(\n                \"process not created\".to_string(),\n            ))\n        } else if p.exited_at.is_some() {\n            Err(Error::NotFoundError(\"process already finished\".to_string()))\n        } else {\n            let pid = p.pid;\n            let kill_future = tokio::task::spawn_blocking(move || {\n                kill(\n                    Pid::from_raw(pid),\n                    nix::sys::signal::Signal::try_from(signal as i32).unwrap(),\n                )\n            });\n\n            match tokio::time::timeout(std::time::Duration::from_secs(3), kill_future).await {\n                Ok(Ok(result)) => result.map_err(Into::into),\n                Ok(Err(e)) => Err(Error::Other(format!(\"kill task error: {}\", e))),\n                Err(_) => {\n                    debug!(\n                        \"kill operation timed out for pid {}, signal {}\",\n                        pid, signal\n                    );\n                    // timeout also return ok\n                    // For termination signals, it may have taken effect even if it timed out\n                    if signal == 9 || signal == 15 {\n                        Ok(())\n                    } else {\n                        Err(Error::DeadlineExceeded(\n                            \"kill operation timed out\".to_string(),\n                        ))\n                    }\n                }\n            }\n        }\n    }\n\n    async fn delete(&self, p: &mut ExecProcess) -> Result<()> {\n        self.exit_signal.signal();\n        let exec_pid_path = Path::new(self.bundle.as_str()).join(format!(\"{}.pid\", p.id));\n        remove_file(exec_pid_path).await.unwrap_or_default();\n        Ok(())\n    }\n\n    async fn update(&self, _p: &mut ExecProcess, _resources: &LinuxResources) -> Result<()> {\n        Err(Error::Unimplemented(\"exec update\".to_string()))\n    }\n\n    async fn stats(&self, _p: &ExecProcess) -> Result<Metrics> {\n        Err(Error::Unimplemented(\"exec stats\".to_string()))\n    }\n\n    async fn ps(&self, _p: &ExecProcess) -> Result<Vec<ProcessInfo>> {\n        Err(Error::Unimplemented(\"exec ps\".to_string()))\n    }\n\n    async fn pause(&self, _p: &mut ExecProcess) -> Result<()> {\n        Err(Error::Unimplemented(\"exec pause\".to_string()))\n    }\n\n    async fn resume(&self, _p: &mut ExecProcess) -> Result<()> {\n        Err(Error::Unimplemented(\"exec resume\".to_string()))\n    }\n}\n\nasync fn copy_console(\n    console_socket: &ConsoleSocket,\n    stdio: &Stdio,\n    exit_signal: Arc<ExitSignal>,\n) -> Result<Console> {\n    debug!(\"copy_console: waiting for runtime to send console fd\");\n    let stream = console_socket.accept().await?;\n    let fd = asyncify(move || -> Result<OwnedFd> { receive_socket(stream.as_raw_fd()) }).await?;\n    let f = unsafe { File::from_raw_fd(fd.into_raw_fd()) };\n    if !stdio.stdin.is_empty() {\n        debug!(\"copy_console: pipe stdin to console\");\n        let console_stdin = unsafe { tokio::fs::File::from_raw_fd(f.as_raw_fd()) };\n        let stdin = handle_file_open(|| async {\n            OpenOptions::new()\n                .read(true)\n                .open(stdio.stdin.as_str())\n                .await\n        })\n        .await\n        .map_err(io_error!(e, \"failed to open stdin\"))?;\n        spawn_copy(stdin, console_stdin, exit_signal.clone(), None::<fn()>);\n    }\n\n    if !stdio.stdout.is_empty() {\n        let console_stdout = unsafe { tokio::fs::File::from_raw_fd(f.as_raw_fd()) };\n        debug!(\"copy_console: pipe stdout from console\");\n        let stdout = OpenOptions::new()\n            .write(true)\n            .open(stdio.stdout.as_str())\n            .await\n            .map_err(io_error!(e, \"open stdout\"))?;\n        // open a read to make sure even if the read end of containerd shutdown,\n        // copy still continue until the restart of containerd succeed\n        let stdout_r = OpenOptions::new()\n            .read(true)\n            .open(stdio.stdout.as_str())\n            .await\n            .map_err(io_error!(e, \"open stdout for read\"))?;\n        spawn_copy(\n            console_stdout,\n            stdout,\n            exit_signal,\n            Some(move || {\n                drop(stdout_r);\n            }),\n        );\n    }\n    let console = Console {\n        file: f.into_std().await,\n    };\n    Ok(console)\n}\n\npub async fn copy_io(pio: &ProcessIO, stdio: &Stdio, exit_signal: Arc<ExitSignal>) -> Result<()> {\n    if !pio.copy {\n        return Ok(());\n    };\n    if let Some(io) = &pio.io {\n        if let Some(w) = io.stdin() {\n            debug!(\"copy_io: pipe stdin from {}\", stdio.stdin.as_str());\n            if !stdio.stdin.is_empty() {\n                let stdin = handle_file_open(|| async {\n                    OpenOptions::new()\n                        .read(true)\n                        .open(stdio.stdin.as_str())\n                        .await\n                })\n                .await\n                .map_err(io_error!(e, \"open stdin\"))?;\n                spawn_copy(stdin, w, exit_signal.clone(), None::<fn()>);\n            }\n        }\n\n        if let Some(r) = io.stdout() {\n            debug!(\"copy_io: pipe stdout from to {}\", stdio.stdout.as_str());\n            if !stdio.stdout.is_empty() {\n                let stdout = handle_file_open(|| async {\n                    OpenOptions::new()\n                        .write(true)\n                        .open(stdio.stdout.as_str())\n                        .await\n                })\n                .await\n                .map_err(io_error!(e, \"open stdout\"))?;\n                // open a read to make sure even if the read end of containerd shutdown,\n                // copy still continue until the restart of containerd succeed\n                let stdout_r = handle_file_open(|| async {\n                    OpenOptions::new()\n                        .read(true)\n                        .open(stdio.stdout.as_str())\n                        .await\n                })\n                .await\n                .map_err(io_error!(e, \"open stdout for read\"))?;\n                spawn_copy(\n                    r,\n                    stdout,\n                    exit_signal.clone(),\n                    Some(move || {\n                        drop(stdout_r);\n                    }),\n                );\n            }\n        }\n\n        if let Some(r) = io.stderr() {\n            if !stdio.stderr.is_empty() {\n                debug!(\"copy_io: pipe stderr from to {}\", stdio.stderr.as_str());\n                let stderr = handle_file_open(|| async {\n                    OpenOptions::new()\n                        .write(true)\n                        .open(stdio.stderr.as_str())\n                        .await\n                })\n                .await\n                .map_err(io_error!(e, \"open stderr\"))?;\n                // open a read to make sure even if the read end of containerd shutdown,\n                // copy still continue until the restart of containerd succeed\n                let stderr_r = handle_file_open(|| async {\n                    OpenOptions::new()\n                        .read(true)\n                        .open(stdio.stderr.as_str())\n                        .await\n                })\n                .await\n                .map_err(io_error!(e, \"open stderr for read\"))?;\n                spawn_copy(\n                    r,\n                    stderr,\n                    exit_signal,\n                    Some(move || {\n                        drop(stderr_r);\n                    }),\n                );\n            }\n        }\n    }\n\n    Ok(())\n}\n\nfn spawn_copy<R, W, F>(from: R, to: W, exit_signal: Arc<ExitSignal>, on_close: Option<F>)\nwhere\n    R: AsyncRead + Send + Unpin + 'static,\n    W: AsyncWrite + Send + Unpin + 'static,\n    F: FnOnce() + Send + 'static,\n{\n    let mut src = from;\n    let mut dst = to;\n    tokio::spawn(async move {\n        tokio::select! {\n            _ = exit_signal.wait() => {\n                debug!(\"container exit, copy task should exit too\");\n            },\n            res = tokio::io::copy(&mut src, &mut dst) => {\n               if let Err(e) = res {\n                    error!(\"copy io failed {}\", e);\n                }\n            }\n        }\n        if let Some(f) = on_close {\n            f();\n        }\n    });\n}\n\nasync fn copy_io_or_console<P>(\n    p: &mut ProcessTemplate<P>,\n    socket: Option<ConsoleSocket>,\n    pio: Option<ProcessIO>,\n    exit_signal: Arc<ExitSignal>,\n) -> Result<()> {\n    if p.stdio.terminal {\n        if let Some(console_socket) = socket {\n            let console_result = copy_console(&console_socket, &p.stdio, exit_signal).await;\n            console_socket.clean().await;\n            match console_result {\n                Ok(c) => {\n                    p.console = Some(c);\n                }\n                Err(e) => {\n                    return Err(e);\n                }\n            }\n        }\n    } else if let Some(pio) = pio {\n        copy_io(&pio, &p.stdio, exit_signal).await?;\n    }\n    Ok(())\n}\n\n#[async_trait]\nimpl Spawner for ShimExecutor {\n    async fn execute(&self, cmd: Command) -> runc::Result<(ExitStatus, u32, String, String)> {\n        let mut cmd = cmd;\n        let subscription = monitor_subscribe(Topic::Pid)\n            .await\n            .map_err(|e| runc::error::Error::Other(Box::new(e)))?;\n        let sid = subscription.id;\n        let child = match cmd.spawn() {\n            Ok(c) => c,\n            Err(e) => {\n                monitor_unsubscribe(sid).await.unwrap_or_default();\n                return Err(runc::error::Error::ProcessSpawnFailed(e));\n            }\n        };\n        let pid = child.id().unwrap();\n        let (stdout, stderr, exit_code) = tokio::join!(\n            read_std(child.stdout),\n            read_std(child.stderr),\n            wait_pid(pid as i32, subscription)\n        );\n        let status = ExitStatus::from_raw(exit_code);\n        monitor_unsubscribe(sid).await.unwrap_or_default();\n        Ok((status, pid, stdout, stderr))\n    }\n}\n\nasync fn read_std<T>(std: Option<T>) -> String\nwhere\n    T: AsyncRead + Unpin,\n{\n    let mut std = std;\n    if let Some(mut std) = std.take() {\n        let mut out = String::new();\n        std.read_to_string(&mut out).await.unwrap_or_else(|e| {\n            error!(\"failed to read stdout {}\", e);\n            0\n        });\n        return out;\n    }\n    \"\".to_string()\n}\n\nasync fn wait_pid(pid: i32, s: Subscription) -> i32 {\n    let mut s = s;\n    loop {\n        if let Some(ExitEvent {\n            subject: Subject::Pid(epid),\n            exit_code: code,\n        }) = s.rx.recv().await\n        {\n            if pid == epid {\n                monitor_unsubscribe(s.id).await.unwrap_or_default();\n                return code;\n            }\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{os::unix::process::ExitStatusExt, path::Path, process::ExitStatus};\n\n    use containerd_shim::util::{mkdir, write_str_to_file};\n    use runc::error::Error::CommandFailed;\n    use tokio::fs::remove_dir_all;\n\n    use crate::{common::LOG_JSON_FILE, runc::runtime_error};\n\n    #[tokio::test]\n    async fn test_runtime_error() {\n        let empty_err = CommandFailed {\n            status: ExitStatus::from_raw(1),\n            stdout: \"\".to_string(),\n            stderr: \"\".to_string(),\n        };\n        let log_json = \"\\\n        {\\\"level\\\":\\\"info\\\",\\\"msg\\\":\\\"hello world\\\",\\\"time\\\":\\\"2022-11-25\\\"}\\n\\\n        {\\\"level\\\":\\\"error\\\",\\\"msg\\\":\\\"failed error\\\",\\\"time\\\":\\\"2022-11-26\\\"}\\n\\\n        {\\\"level\\\":\\\"error\\\",\\\"msg\\\":\\\"panic\\\",\\\"time\\\":\\\"2022-11-27\\\"}\\n\\\n        \";\n        let test_dir = \"/tmp/shim-test\";\n        let _ = mkdir(test_dir, 0o744).await;\n        write_str_to_file(Path::new(test_dir).join(LOG_JSON_FILE).as_path(), log_json)\n            .await\n            .expect(\"write log json should not be error\");\n\n        let expectd_msg = \"panic\";\n        let actual_err = runtime_error(test_dir, empty_err, \"\").await;\n        remove_dir_all(test_dir)\n            .await\n            .expect(\"remove test dir should not be error\");\n        assert!(\n            actual_err.to_string().contains(expectd_msg),\n            \"actual error \\\"{}\\\" should contains \\\"{}\\\"\",\n            actual_err,\n            expectd_msg\n        );\n    }\n}\n"
  },
  {
    "path": "crates/runc-shim/src/service.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{env::current_dir, sync::Arc, time::Duration};\n\nuse ::runc::options::DeleteOpts;\nuse async_trait::async_trait;\nuse containerd_shim::{\n    asynchronous::{\n        monitor::{monitor_subscribe, monitor_unsubscribe, Subscription},\n        publisher::RemotePublisher,\n        spawn, ExitSignal, Shim,\n    },\n    event::Event,\n    io_error,\n    monitor::{Subject, Topic},\n    mount::umount_recursive,\n    protos::{events::task::TaskExit, protobuf::MessageDyn, ttrpc::context::with_duration},\n    util::{\n        convert_to_timestamp, read_options, read_pid_from_file, read_runtime, read_spec, timestamp,\n        write_str_to_file,\n    },\n    Config, DeleteResponse, Error, Flags, StartOpts,\n};\nuse log::{debug, error, warn};\nuse tokio::sync::mpsc::{channel, Receiver, Sender};\n\nuse crate::{\n    common::{create_runc, has_shared_pid_namespace, ShimExecutor, GROUP_LABELS, INIT_PID_FILE},\n    container::Container,\n    processes::Process,\n    runc::{RuncContainer, RuncFactory},\n    task::TaskService,\n};\n\npub(crate) struct Service {\n    exit: Arc<ExitSignal>,\n    id: String,\n    namespace: String,\n}\n\n#[async_trait]\nimpl Shim for Service {\n    type T = TaskService<RuncFactory, RuncContainer>;\n\n    async fn new(_runtime_id: &str, args: &Flags, _config: &mut Config) -> Self {\n        let exit = Arc::new(ExitSignal::default());\n        // TODO: add publisher\n        Service {\n            exit,\n            id: args.id.to_string(),\n            namespace: args.namespace.to_string(),\n        }\n    }\n\n    async fn start_shim(&mut self, opts: StartOpts) -> containerd_shim::Result<String> {\n        let mut grouping = opts.id.clone();\n        let spec = read_spec(\"\").await?;\n        if let Some(annotations) = spec.annotations() {\n            for &label in GROUP_LABELS.iter() {\n                if let Some(value) = annotations.get(label) {\n                    grouping = value.to_string();\n                    break;\n                }\n            }\n        }\n        #[cfg(not(target_os = \"linux\"))]\n        let thp_disabled = String::new();\n        #[cfg(target_os = \"linux\")]\n        // Our goal is to set thp disable = true on the shim side and then restore thp\n        // disable before starting runc. So we only need to focus on the return value\n        // of the function get_thp_disabled, which is Result<bool, i32>.\n        let thp_disabled = {\n            let ret = unsafe { libc::prctl(libc::PR_GET_THP_DISABLE, 0, 0, 0, 0) };\n            if ret >= 0 {\n                let was_disabled = ret > 0;\n                // We don't care if the setting is successful, because even if the\n                // setting failed, we should not exit the shim process.\n                let _ = unsafe { libc::prctl(libc::PR_SET_THP_DISABLE, 1u64, 0, 0, 0) };\n                was_disabled.to_string()\n            } else {\n                String::new()\n            }\n        };\n        let vars: Vec<(&str, &str)> = vec![(\"THP_DISABLED\", thp_disabled.as_str())];\n\n        let address = spawn(opts, &grouping, vars).await?;\n        write_str_to_file(\"address\", &address).await?;\n        Ok(address)\n    }\n\n    async fn delete_shim(&mut self) -> containerd_shim::Result<DeleteResponse> {\n        let namespace = self.namespace.as_str();\n        let bundle = current_dir().map_err(io_error!(e, \"get current dir\"))?;\n        let opts = read_options(&bundle).await?;\n        let runtime = read_runtime(&bundle).await.unwrap_or_default();\n\n        let runc = create_runc(\n            &runtime,\n            namespace,\n            &bundle,\n            &opts,\n            Some(Arc::new(ShimExecutor::default())),\n        )?;\n        let pid = read_pid_from_file(&bundle.join(INIT_PID_FILE))\n            .await\n            .unwrap_or_default();\n\n        runc.delete(&self.id, Some(&DeleteOpts { force: true }))\n            .await\n            .unwrap_or_else(|e| warn!(\"failed to remove runc container: {}\", e));\n        umount_recursive(bundle.join(\"rootfs\").to_str(), 0)\n            .unwrap_or_else(|e| warn!(\"failed to umount recursive rootfs: {}\", e));\n        let mut resp = DeleteResponse::new();\n        // sigkill\n        resp.set_exit_status(137);\n        resp.set_exited_at(timestamp()?);\n        resp.set_pid(pid as u32);\n        Ok(resp)\n    }\n\n    async fn wait(&mut self) {\n        self.exit.wait().await;\n    }\n\n    async fn create_task_service(&self, publisher: RemotePublisher) -> Self::T {\n        let (tx, rx) = channel(128);\n        let exit_clone = self.exit.clone();\n        let task = TaskService::new(&self.namespace, exit_clone, tx.clone());\n        let s = monitor_subscribe(Topic::Pid)\n            .await\n            .expect(\"monitor subscribe failed\");\n        process_exits(s, &task, tx).await;\n        forward(publisher, self.namespace.to_string(), rx).await;\n        task\n    }\n}\n\nasync fn process_exits(\n    s: Subscription,\n    task: &TaskService<RuncFactory, RuncContainer>,\n    tx: Sender<(String, Box<dyn MessageDyn>)>,\n) {\n    let containers = task.containers.clone();\n    let mut s = s;\n    tokio::spawn(async move {\n        while let Some(e) = s.rx.recv().await {\n            if let Subject::Pid(pid) = e.subject {\n                debug!(\"receive exit event: {}\", &e);\n                let exit_code = e.exit_code;\n                for (_k, cont) in containers.write().await.iter_mut() {\n                    let bundle = cont.bundle.to_string();\n                    let container_id = cont.id.clone();\n                    let mut change_process: Vec<&mut (dyn Process + Send + Sync)> = Vec::new();\n                    // pid belongs to container init process\n                    if cont.init.pid == pid {\n                        // kill all children process if the container has a private PID namespace\n                        if should_kill_all_on_exit(&bundle).await {\n                            cont.kill(None, 9, true).await.unwrap_or_else(|e| {\n                                error!(\"failed to kill init's children: {}\", e)\n                            });\n                        }\n                        if let Ok(process_d) = cont.get_mut_process(None) {\n                            change_process.push(process_d);\n                        } else {\n                            break;\n                        }\n                    } else {\n                        // pid belongs to container common process\n                        if let Some((_, p)) = cont.processes.iter_mut().find(|(_, p)| p.pid == pid)\n                        {\n                            change_process.push(p as &mut (dyn Process + Send + Sync));\n                        }\n                    }\n                    let process_len = change_process.len();\n                    for process in change_process {\n                        // set exit for process\n                        process.set_exited(exit_code).await;\n                        let code = process.exit_code().await;\n                        let exited_at = process.exited_at().await;\n                        // publish event\n                        let ts = convert_to_timestamp(exited_at);\n                        let event = TaskExit {\n                            container_id: container_id.clone(),\n                            id: process.id().await.to_string(),\n                            pid: process.pid().await as u32,\n                            exit_status: code as u32,\n                            exited_at: Some(ts).into(),\n                            ..Default::default()\n                        };\n                        let topic = event.topic();\n                        tx.send((topic.to_string(), Box::new(event)))\n                            .await\n                            .unwrap_or_else(|e| warn!(\"send {} to publisher: {}\", topic, e));\n                    }\n                    //if process has been find , no need to keep search\n                    if process_len != 0 {\n                        break;\n                    }\n                }\n            }\n        }\n        monitor_unsubscribe(s.id).await.unwrap_or_default();\n    });\n}\n\nasync fn forward(\n    publisher: RemotePublisher,\n    ns: String,\n    mut rx: Receiver<(String, Box<dyn MessageDyn>)>,\n) {\n    tokio::spawn(async move {\n        while let Some((topic, e)) = rx.recv().await {\n            // While ttrpc push the event,give it a 5 seconds timeout.\n            // Prevent event reporting from taking too long time.\n            // Learnd from goshim's containerd/runtime/v2/shim/publisher.go\n            publisher\n                .publish(with_duration(Duration::from_secs(5)), &topic, &ns, e)\n                .await\n                .unwrap_or_else(|e| warn!(\"publish {} to containerd: {}\", topic, e));\n        }\n    });\n}\n\nasync fn should_kill_all_on_exit(bundle_path: &str) -> bool {\n    match read_spec(bundle_path).await {\n        Ok(spec) => has_shared_pid_namespace(&spec),\n        Err(e) => {\n            error!(\n                \"failed to read spec when call should_kill_all_on_exit: {}\",\n                e\n            );\n            false\n        }\n    }\n}\n"
  },
  {
    "path": "crates/runc-shim/src/task.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\nuse std::{collections::HashMap, sync::Arc};\n\nuse async_trait::async_trait;\nuse containerd_shim::{\n    api::{\n        CreateTaskRequest, CreateTaskResponse, DeleteRequest, Empty, ExecProcessRequest,\n        KillRequest, ResizePtyRequest, ShutdownRequest, StartRequest, StartResponse, StateRequest,\n        StateResponse, Status, WaitRequest, WaitResponse,\n    },\n    asynchronous::ExitSignal,\n    event::Event,\n    protos::{\n        api::{\n            CloseIORequest, ConnectRequest, ConnectResponse, DeleteResponse, PidsRequest,\n            PidsResponse, StatsRequest, StatsResponse, UpdateTaskRequest,\n        },\n        events::task::{TaskCreate, TaskDelete, TaskExecAdded, TaskExecStarted, TaskIO, TaskStart},\n        protobuf::{EnumOrUnknown, MessageDyn},\n        shim_async::Task,\n        ttrpc::{self, r#async::TtrpcContext},\n    },\n    util::{convert_to_any, convert_to_timestamp, AsOption},\n    TtrpcResult,\n};\nuse log::{debug, info, warn};\nuse oci_spec::runtime::LinuxResources;\nuse tokio::sync::{\n    mpsc::Sender, RwLock, RwLockMappedWriteGuard, RwLockReadGuard, RwLockWriteGuard,\n};\n\nuse super::container::{Container, ContainerFactory};\ntype EventSender = Sender<(String, Box<dyn MessageDyn>)>;\n\n#[cfg(target_os = \"linux\")]\nuse std::path::Path;\n\n#[cfg(target_os = \"linux\")]\nuse cgroups_rs::fs::hierarchies::is_cgroup2_unified_mode;\nuse containerd_shim::{\n    api::{PauseRequest, ResumeRequest},\n    protos::events::task::{TaskPaused, TaskResumed},\n};\n#[cfg(target_os = \"linux\")]\nuse containerd_shim::{\n    error::{Error, Result},\n    other_error,\n    protos::events::task::TaskOOM,\n};\n#[cfg(target_os = \"linux\")]\nuse log::error;\n#[cfg(target_os = \"linux\")]\nuse tokio::{sync::mpsc::Receiver, task::spawn};\n\n#[cfg(target_os = \"linux\")]\nuse crate::cgroup_memory;\n\n/// TaskService is a Task template struct, it is considered a helper struct,\n/// which has already implemented `Task` trait, so that users can make it the type `T`\n/// parameter of `Service`, and implements their own `ContainerFactory` and `Container`.\npub struct TaskService<F, C> {\n    pub factory: F,\n    // In comparison, a Mutex does not distinguish between readers or writers that acquire the lock,\n    // therefore causing any tasks waiting for the lock to become available to yield.\n    // An RwLock will allow any number of readers to acquire the lock as long as a writer is not holding the lock.\n    pub containers: Arc<RwLock<HashMap<String, C>>>,\n    pub namespace: String,\n    pub exit: Arc<ExitSignal>,\n    pub tx: EventSender,\n}\n\nimpl<F, C> TaskService<F, C>\nwhere\n    F: Default,\n{\n    pub fn new(ns: &str, exit: Arc<ExitSignal>, tx: EventSender) -> Self {\n        Self {\n            factory: Default::default(),\n            containers: Arc::new(RwLock::new(Default::default())),\n            namespace: ns.to_string(),\n            exit,\n            tx,\n        }\n    }\n}\n\nimpl<F, C> TaskService<F, C> {\n    pub async fn container_mut(&self, id: &str) -> TtrpcResult<RwLockMappedWriteGuard<'_, C>> {\n        let mut containers = self.containers.write().await;\n        containers.get_mut(id).ok_or_else(|| {\n            ttrpc::Error::RpcStatus(ttrpc::get_status(\n                ttrpc::Code::NOT_FOUND,\n                format!(\"can not find container by id {}\", id),\n            ))\n        })?;\n        let container = RwLockWriteGuard::map(containers, |m| m.get_mut(id).unwrap());\n        Ok(container)\n    }\n\n    pub async fn container(&self, id: &str) -> TtrpcResult<RwLockReadGuard<'_, C>> {\n        let containers = self.containers.read().await;\n        containers.get(id).ok_or_else(|| {\n            ttrpc::Error::RpcStatus(ttrpc::get_status(\n                ttrpc::Code::NOT_FOUND,\n                format!(\"can not find container by id {}\", id),\n            ))\n        })?;\n        let container = RwLockReadGuard::map(containers, |m| m.get(id).unwrap());\n        Ok(container)\n    }\n\n    pub async fn send_event(&self, event: impl Event) {\n        let topic = event.topic();\n        self.tx\n            .send((topic.to_string(), Box::new(event)))\n            .await\n            .unwrap_or_else(|e| warn!(\"send {} to publisher: {}\", topic, e));\n    }\n}\n\n#[cfg(target_os = \"linux\")]\nfn run_oom_monitor(mut rx: Receiver<String>, id: String, tx: EventSender) {\n    let oom_event = TaskOOM {\n        container_id: id,\n        ..Default::default()\n    };\n    let topic = oom_event.topic();\n    let oom_box = Box::new(oom_event);\n    spawn(async move {\n        while let Some(_item) = rx.recv().await {\n            tx.send((topic.to_string(), oom_box.clone()))\n                .await\n                .unwrap_or_else(|e| warn!(\"send {} to publisher: {}\", topic, e));\n        }\n    });\n}\n\n#[cfg(target_os = \"linux\")]\nasync fn monitor_oom(id: &String, pid: u32, tx: EventSender) -> Result<()> {\n    if !is_cgroup2_unified_mode() {\n        let path_from_cgorup = cgroup_memory::get_path_from_cgorup(pid).await?;\n        let (mount_root, mount_point) =\n            cgroup_memory::get_existing_cgroup_mem_path(path_from_cgorup).await?;\n\n        let mem_cgroup_path = mount_point + &mount_root;\n        let rx = cgroup_memory::register_memory_event(\n            id,\n            Path::new(&mem_cgroup_path),\n            \"memory.oom_control\",\n        )\n        .await\n        .map_err(other_error!(\"register_memory_event failed:\"))?;\n\n        run_oom_monitor(rx, id.to_string(), tx);\n    }\n    Ok(())\n}\n\n#[async_trait]\nimpl<F, C> Task for TaskService<F, C>\nwhere\n    F: ContainerFactory<C> + Sync + Send,\n    C: Container + Sync + Send + 'static,\n{\n    async fn state(&self, _ctx: &TtrpcContext, req: StateRequest) -> TtrpcResult<StateResponse> {\n        let container = self.container(req.id()).await?;\n        let exec_id = req.exec_id().as_option();\n        let resp = container.state(exec_id).await?;\n        Ok(resp)\n    }\n\n    async fn create(\n        &self,\n        _ctx: &TtrpcContext,\n        req: CreateTaskRequest,\n    ) -> TtrpcResult<CreateTaskResponse> {\n        info!(\"Create request for {:?}\", &req);\n        // Note: Get containers here is for getting the lock,\n        // to make sure no other threads manipulate the containers metadata;\n        let ns = self.namespace.as_str();\n        let id = req.id.as_str();\n        let mut resp = CreateTaskResponse::new();\n        let pid = {\n            let mut containers = self.containers.write().await;\n            let container = self.factory.create(ns, &req).await?;\n            let pid = container.pid().await as u32;\n            resp.pid = pid;\n            containers.insert(id.to_string(), container);\n            pid\n        };\n\n        self.send_event(TaskCreate {\n            container_id: req.id.to_string(),\n            bundle: req.bundle.to_string(),\n            rootfs: req.rootfs,\n            io: Some(TaskIO {\n                stdin: req.stdin.to_string(),\n                stdout: req.stdout.to_string(),\n                stderr: req.stderr.to_string(),\n                terminal: req.terminal,\n                ..Default::default()\n            })\n            .into(),\n            checkpoint: req.checkpoint.to_string(),\n            pid,\n            ..Default::default()\n        })\n        .await;\n        info!(\"Create request for {} returns pid {}\", id, resp.pid);\n        Ok(resp)\n    }\n\n    async fn start(&self, _ctx: &TtrpcContext, req: StartRequest) -> TtrpcResult<StartResponse> {\n        info!(\"Start request for {:?}\", &req);\n        let pid = {\n            let mut container = self.container_mut(req.id()).await?;\n            // Prevent the init process from exiting and continuing with start\n            // Return early to reduce the time it takes to return only when runc encounters an error\n            if container.init_state().await == EnumOrUnknown::new(Status::STOPPED) {\n                debug!(\"container init process has exited, start process should not continue\");\n                return Err(ttrpc::Error::RpcStatus(ttrpc::get_status(\n                    ttrpc::Code::FAILED_PRECONDITION,\n                    format!(\"container init process has exited {}\", container.id().await),\n                )));\n            }\n            container.start(req.exec_id.as_str().as_option()).await?\n        };\n\n        let mut resp = StartResponse::new();\n        resp.pid = pid as u32;\n\n        if req.exec_id.is_empty() {\n            self.send_event(TaskStart {\n                container_id: req.id.to_string(),\n                pid: pid as u32,\n                ..Default::default()\n            })\n            .await;\n            #[cfg(target_os = \"linux\")]\n            if let Err(e) = monitor_oom(&req.id, resp.pid, self.tx.clone()).await {\n                error!(\"monitor_oom failed: {:?}.\", e);\n            }\n        } else {\n            self.send_event(TaskExecStarted {\n                container_id: req.id.to_string(),\n                exec_id: req.exec_id.to_string(),\n                pid: pid as u32,\n                ..Default::default()\n            })\n            .await;\n        };\n\n        info!(\"Start request for {:?} returns pid {}\", req, resp.pid());\n        Ok(resp)\n    }\n\n    async fn delete(&self, _ctx: &TtrpcContext, req: DeleteRequest) -> TtrpcResult<DeleteResponse> {\n        info!(\"Delete request for {:?}\", &req);\n        let (id, pid, exit_status, exited_at) = {\n            let mut container = self.container_mut(req.id()).await?;\n            let id = container.id().await;\n            let exec_id_opt = req.exec_id().as_option();\n            let (pid, exit_status, exited_at) = container.delete(exec_id_opt).await?;\n            self.factory.cleanup(&self.namespace, &container).await?;\n            (id, pid, exit_status, exited_at)\n        };\n\n        if req.exec_id().is_empty() {\n            self.containers.write().await.remove(req.id());\n        }\n\n        let ts = convert_to_timestamp(exited_at);\n        self.send_event(TaskDelete {\n            container_id: id,\n            pid: pid as u32,\n            exit_status: exit_status as u32,\n            exited_at: Some(ts.clone()).into(),\n            ..Default::default()\n        })\n        .await;\n\n        let mut resp = DeleteResponse::new();\n        resp.set_exited_at(ts);\n        resp.set_pid(pid as u32);\n        resp.set_exit_status(exit_status as u32);\n        info!(\n            \"Delete request for {} {} returns {:?}\",\n            req.id(),\n            req.exec_id(),\n            resp\n        );\n        Ok(resp)\n    }\n\n    async fn pids(&self, _ctx: &TtrpcContext, req: PidsRequest) -> TtrpcResult<PidsResponse> {\n        debug!(\"Pids request for {:?}\", req);\n        let processes = self.container(req.id()).await?.all_processes().await?;\n        debug!(\"Pids request for {:?} returns successfully\", req);\n        Ok(PidsResponse {\n            processes,\n            ..Default::default()\n        })\n    }\n\n    async fn pause(&self, _ctx: &TtrpcContext, req: PauseRequest) -> TtrpcResult<Empty> {\n        info!(\"pause request for {:?}\", req);\n        self.container_mut(req.id()).await?.pause().await?;\n        self.send_event(TaskPaused {\n            container_id: req.id.to_string(),\n            ..Default::default()\n        })\n        .await;\n        info!(\"pause request for {:?} returns successfully\", req);\n        Ok(Empty::new())\n    }\n\n    async fn resume(&self, _ctx: &TtrpcContext, req: ResumeRequest) -> TtrpcResult<Empty> {\n        info!(\"resume request for {:?}\", req);\n        self.container_mut(req.id()).await?.resume().await?;\n        self.send_event(TaskResumed {\n            container_id: req.id.to_string(),\n            ..Default::default()\n        })\n        .await;\n        info!(\"resume request for {:?} returns successfully\", req);\n        Ok(Empty::new())\n    }\n\n    async fn kill(&self, _ctx: &TtrpcContext, req: KillRequest) -> TtrpcResult<Empty> {\n        info!(\"Kill request for {:?}\", req);\n        self.container_mut(req.id())\n            .await?\n            .kill(req.exec_id().as_option(), req.signal, req.all)\n            .await?;\n        info!(\"Kill request for {:?} returns successfully\", req);\n        Ok(Empty::new())\n    }\n\n    async fn exec(&self, _ctx: &TtrpcContext, req: ExecProcessRequest) -> TtrpcResult<Empty> {\n        info!(\"Exec request for {:?}\", req);\n        let exec_id = req.exec_id().to_string();\n\n        let container_id = {\n            let mut container = self.container_mut(req.id()).await?;\n            container.exec(req).await?;\n            container.id().await\n        };\n\n        self.send_event(TaskExecAdded {\n            container_id,\n            exec_id,\n            ..Default::default()\n        })\n        .await;\n\n        Ok(Empty::new())\n    }\n\n    async fn resize_pty(&self, _ctx: &TtrpcContext, req: ResizePtyRequest) -> TtrpcResult<Empty> {\n        debug!(\n            \"Resize pty request for container {}, exec_id: {}\",\n            &req.id, &req.exec_id\n        );\n        self.container_mut(req.id())\n            .await?\n            .resize_pty(req.exec_id().as_option(), req.height, req.width)\n            .await?;\n        Ok(Empty::new())\n    }\n\n    async fn close_io(&self, _ctx: &TtrpcContext, req: CloseIORequest) -> TtrpcResult<Empty> {\n        self.container_mut(req.id())\n            .await?\n            .close_io(req.exec_id().as_option())\n            .await?;\n        Ok(Empty::new())\n    }\n\n    async fn update(&self, _ctx: &TtrpcContext, mut req: UpdateTaskRequest) -> TtrpcResult<Empty> {\n        debug!(\"Update request for id {:?}\", req.id);\n\n        let id = req.take_id();\n\n        let data = req\n            .resources\n            .into_option()\n            .map(|r| r.value)\n            .unwrap_or_default();\n\n        let resources: LinuxResources = serde_json::from_slice(&data).map_err(|e| {\n            ttrpc::Error::RpcStatus(ttrpc::get_status(\n                ttrpc::Code::INVALID_ARGUMENT,\n                format!(\"failed to parse resource spec: {}\", e),\n            ))\n        })?;\n        debug!(\"Update resource is {:?}\", resources);\n        self.container_mut(&id).await?.update(&resources).await?;\n        Ok(Empty::new())\n    }\n\n    async fn wait(&self, _ctx: &TtrpcContext, req: WaitRequest) -> TtrpcResult<WaitResponse> {\n        info!(\"Wait request for {:?}\", req);\n        let exec_id = req.exec_id.as_str().as_option();\n        let wait_rx = {\n            let mut container = self.container_mut(req.id()).await?;\n            let state = container.state(exec_id).await?;\n            if state.status() != Status::RUNNING && state.status() != Status::CREATED {\n                let mut resp = WaitResponse::new();\n                resp.exit_status = state.exit_status;\n                resp.exited_at = state.exited_at;\n                info!(\"Wait request for {:?} returns {:?}\", req, &resp);\n                return Ok(resp);\n            }\n            container.wait_channel(req.exec_id().as_option()).await?\n        };\n\n        wait_rx.await.unwrap_or_default();\n        // get lock again.\n        let (_, code, exited_at) = self\n            .container(req.id())\n            .await?\n            .get_exit_info(exec_id)\n            .await?;\n        let mut resp = WaitResponse::new();\n        resp.set_exit_status(code as u32);\n        let ts = convert_to_timestamp(exited_at);\n        resp.set_exited_at(ts);\n        info!(\"Wait request for {:?} returns {:?}\", req, &resp);\n        Ok(resp)\n    }\n\n    async fn stats(&self, _ctx: &TtrpcContext, req: StatsRequest) -> TtrpcResult<StatsResponse> {\n        debug!(\"Stats request for {:?}\", req);\n        let stats = self.container(req.id()).await?.stats().await?;\n        let mut resp = StatsResponse::new();\n        resp.set_stats(convert_to_any(Box::new(stats))?);\n        Ok(resp)\n    }\n\n    async fn connect(\n        &self,\n        _ctx: &TtrpcContext,\n        req: ConnectRequest,\n    ) -> TtrpcResult<ConnectResponse> {\n        info!(\"Connect request for {:?}\", req);\n\n        let pid = if let Ok(container) = self.container(req.id()).await {\n            container.pid().await as u32\n        } else {\n            0\n        };\n\n        Ok(ConnectResponse {\n            shim_pid: std::process::id(),\n            task_pid: pid,\n            ..Default::default()\n        })\n    }\n\n    async fn shutdown(&self, _ctx: &TtrpcContext, _req: ShutdownRequest) -> TtrpcResult<Empty> {\n        debug!(\"Shutdown request\");\n        let containers = self.containers.read().await;\n        if !containers.is_empty() {\n            return Ok(Empty::new());\n        }\n        self.exit.signal();\n        Ok(Empty::default())\n    }\n}\n"
  },
  {
    "path": "crates/shim/Cargo.toml",
    "content": "[package]\nname = \"containerd-shim\"\nversion = \"0.11.0\"\nauthors = [\n    \"Maksym Pavlenko <pavlenko.maksym@gmail.com>\",\n    \"The containerd Authors\",\n]\ndescription = \"containerd shim extension\"\nkeywords = [\"containerd\", \"shim\", \"containers\"]\ncategories = [\"api-bindings\", \"asynchronous\"]\n\nedition.workspace = true\nlicense.workspace = true\nrepository.workspace = true\nhomepage.workspace = true\n\n[features]\nasync = [\"async-trait\", \"containerd-shim-protos/async\", \"futures\", \"tokio\"]\ntracing = [\"dep:tracing\"]\ndocs = []\n\n[[example]]\nname = \"skeleton_async\"\nrequired-features = [\"async\"]\n\n[[example]]\nname = \"windows-log-reader\"\npath = \"examples/windows_log_reader.rs\"\n\n[dependencies]\nwhich = { version = \"8.0.0\", default-features = false, features = [\"real-sys\"] }\ncontainerd-shim-protos = { path = \"../shim-protos\", version = \"0.11.0\" }\ngo-flag = \"0.1.0\"\nsha2 = { version = \"0.10\", default-features = false, features = [\"std\"] }\nlibc.workspace = true\nlog = { workspace = true, features = [\"std\", \"kv_unstable\"] }\nnix = { workspace = true, features = [\n    \"fs\",\n    \"socket\",\n    \"signal\",\n    \"mount\",\n    \"sched\",\n] }\noci-spec = { workspace = true, features = [\"runtime\"] }\nsignal-hook = \"0.3.18\"\nserde = { workspace = true, features = [\"derive\", \"std\"] }\nserde_json = { workspace = true, features = [\"std\"] }\ntempfile.workspace = true\nthiserror.workspace = true\ntime = { workspace = true, features = [\"std\", \"formatting\"] }\n\n# tracing\ntracing = { version = \"0.1\", default-features = false, optional = true }\n\n# Async dependencies\nasync-trait = { workspace = true, optional = true }\nfutures = { workspace = true, features = [\"std\", \"alloc\"], optional = true }\ntokio = { workspace = true, features = [\"macros\", \"rt-multi-thread\", \"process\", \"sync\", \"fs\", \"io-util\", \"time\", \"signal\", \"io-std\"], optional = true }\n\n[target.'cfg(target_os = \"linux\")'.dependencies]\ncgroups-rs.workspace = true\n\n[target.'cfg(windows)'.dependencies]\nmio = { version = \"1.1\", default-features = false, features = [\"os-ext\", \"os-poll\"] }\nwindows-sys = { version = \"0.52.0\", default-features = false, features = [\n    \"Win32_Foundation\",\n    \"Win32_System_Console\",\n    \"Win32_System_Pipes\",\n    \"Win32_Security\",\n    \"Win32_Storage_FileSystem\",\n    \"Win32_System_Threading\",\n] }\n\n[dev-dependencies]\ntempfile.workspace = true\n\n[package.metadata.docs.rs]\nfeatures = [\"docs\"]\n"
  },
  {
    "path": "crates/shim/README.md",
    "content": "# Shim extension for containerd\n\n[![Crates.io](https://img.shields.io/crates/v/containerd-shim)](https://crates.io/crates/containerd-shim)\n[![docs.rs](https://img.shields.io/docsrs/containerd-shim)](https://docs.rs/containerd-shim/latest/containerd_shim/)\n[![Crates.io](https://img.shields.io/crates/l/containerd-shim)](https://github.com/containerd/rust-extensions/blob/main/LICENSE)\n[![CI](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml)\n\nRust crate to ease runtime v2 shim implementation.\n\nIt replicates same [shim.Run](https://github.com/containerd/containerd/blob/dbef1d56d7ebc05bc4553d72c419ed5ce025b05d/runtime/v2/example/cmd/main.go)\nAPI offered by containerd's shim v2 runtime implementation written in Go.\n\n## Runtime\n\nRuntime v2 introduces a first class shim API for runtime authors to integrate with containerd.\nThe shim API is minimal and scoped to the execution lifecycle of a container.\n\nThis crate simplifies shim v2 runtime development for containerd. It handles common tasks such\nas command line parsing, setting up shim's TTRPC server, logging, events, etc.\n\nClients are expected to implement [Shim] and [Task] traits with task handling routines.\nThis generally replicates same API as in Go [version](https://github.com/containerd/containerd/blob/main/core/runtime/v2/example/cmd/main.go).\n\nOnce implemented, shim's bootstrap code is as easy as:\n```text\nshim::run::<Service>(\"io.containerd.empty.v1\")\n```\n\n## Look and feel\n\nThe API is very similar to the one offered by Go version:\n\n```rust,no_run\nuse std::sync::Arc;\n\nuse async_trait::async_trait;\nuse containerd_shim::{\n    asynchronous::{run, spawn, ExitSignal, Shim},\n    publisher::RemotePublisher,\n    Config, Error, Flags, StartOpts, TtrpcResult,\n};\nuse containerd_shim_protos::{\n    api, api::DeleteResponse, shim_async::Task, ttrpc::r#async::TtrpcContext,\n};\nuse log::info;\n\n#[derive(Clone)]\nstruct Service {\n    exit: Arc<ExitSignal>,\n}\n\n#[async_trait]\nimpl Shim for Service {\n    type T = Service;\n\n    async fn new(_runtime_id: &str, _args: &Flags, _config: &mut Config) -> Self {\n        Service {\n            exit: Arc::new(ExitSignal::default()),\n        }\n    }\n\n    async fn start_shim(&mut self, opts: StartOpts) -> Result<String, Error> {\n        let grouping = opts.id.clone();\n        let address = spawn(opts, &grouping, Vec::new()).await?;\n        Ok(address)\n    }\n\n    async fn delete_shim(&mut self) -> Result<DeleteResponse, Error> {\n        Ok(DeleteResponse::new())\n    }\n\n    async fn wait(&mut self) {\n        self.exit.wait().await;\n    }\n\n    async fn create_task_service(&self, _publisher: RemotePublisher) -> Self::T {\n        self.clone()\n    }\n}\n\n#[async_trait]\nimpl Task for Service {\n    async fn connect(\n        &self,\n        _ctx: &TtrpcContext,\n        _req: api::ConnectRequest,\n    ) -> TtrpcResult<api::ConnectResponse> {\n        info!(\"Connect request\");\n        Ok(api::ConnectResponse {\n            version: String::from(\"example\"),\n            ..Default::default()\n        })\n    }\n\n    async fn shutdown(\n        &self,\n        _ctx: &TtrpcContext,\n        _req: api::ShutdownRequest,\n    ) -> TtrpcResult<api::Empty> {\n        info!(\"Shutdown request\");\n        self.exit.signal();\n        Ok(api::Empty::default())\n    }\n}\n\n#[tokio::main]\nasync fn main() {\n    run::<Service>(\"io.containerd.empty.v1\", None).await;\n}\n\n```\n\n## How to use with containerd\n\n**Note**: All operations are in the root directory of `rust-extensions`.\n\nWith shim v2 runtime:\n\n```bash\n$ cargo build --example skeleton\n$ sudo cp ./target/debug/examples/skeleton /usr/local/bin/containerd-shim-skeleton-v1\n$ sudo ctr run --rm --runtime io.containerd.skeleton.v1 -t docker.io/library/hello-world:latest hello\n```\n\nOr if on 1.6+\n\n```bash\n$ cargo build --example skeleton\n$ sudo ctr run --rm --runtime ./target/debug/examples/skeleton docker.io/library/hello-world:latest hello\n```\n\nOr manually:\n\n```bash\n$ touch log\n\n# Run containerd in background\n$ sudo TTRPC_ADDRESS=\"/var/run/containerd/containerd.sock.ttrpc\" \\\n    cargo run --example skeleton -- \\\n    -namespace default \\\n    -id 1234 \\\n    -address /var/run/containerd/containerd.sock \\\n    -publish-binary ./bin/containerd \\\n    start\nunix:///var/run/containerd/eb8e7d1c48c2a1ec.sock\n\n$ cargo build --example shim-proto-connect\n$ sudo ./target/debug/examples/shim-proto-connect unix:///var/run/containerd/eb8e7d1c48c2a1ec.sock\nConnecting to unix:///var/run/containerd/eb8e7d1c48c2a1ec.sock...\nSending `Connect` request...\nConnect response: version: \"example\"\nSending `Shutdown` request...\nShutdown response: \"\"\n\n$ cat log\n[INFO] server listen started\n[INFO] server started\n[INFO] Shim successfully started, waiting for exit signal...\n[INFO] Connect request\n[INFO] Shutdown request\n[INFO] Shutting down shim instance\n[INFO] close monitor\n[INFO] listener shutdown for quit flag\n[INFO] ttrpc server listener stopped\n[INFO] listener thread stopped\n[INFO] begin to shutdown connection\n[INFO] connections closed\n[INFO] reaper thread exited\n[INFO] reaper thread stopped\n```\n\n### Running on Windows\n```powershell\n# Run containerd in background\n$env:TTRPC_ADDRESS=\"\\\\.\\pipe\\containerd-containerd.ttrpc\"\n\n$ cargo run --example skeleton -- -namespace default -id 1234 -address \"\\\\.\\pipe\\containerd-containerd\" start\n\\\\.\\pipe\\containerd-shim-bc764c65e177434fcefe8257dc440be8b8acf7c96156320d965938f7e9ae1a35-pipe\n\n# (Optional) Run the log collector in a separate command window\n# note: log reader won't work if containerd is connected to the named pipe, this works when running manually to help debug locally\n$ cargo run --example windows-log-reader \\\\.\\pipe\\containerd-shim-default-1234-log\nReading logs from: \\\\.\\pipe\\containerd-shim-default-1234-log\n<logs will appear after next command>\n\n$ cargo run --example shim-proto-connect \\\\.\\pipe\\containerd-shim-bc764c65e177434fcefe8257dc440be8b8acf7c96156320d965938f7e9ae1a35-pipe\nConnecting to \\\\.\\pipe\\containerd-shim-bc764c65e177434fcefe8257dc440be8b8acf7c96156320d965938f7e9ae1a35-pipe...\nSending `Connect` request...\nConnect response: version: \"example\"\nSending `Shutdown` request...\nShutdown response: \"\"\n```\n\n## Supported Platforms\nCurrently, following OSs and hardware architectures are supported, and more efforts are needed to enable and validate other OSs and architectures.\n- Linux\n- Mac OS\n- Windows\n"
  },
  {
    "path": "crates/shim/examples/publish.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\nuse std::env;\n\nuse containerd_shim::{publisher::RemotePublisher, Context};\nuse containerd_shim_protos::events::task::TaskOOM;\n\n#[cfg(not(feature = \"async\"))]\nfn main() {\n    let args: Vec<String> = env::args().collect();\n\n    // Must not start with unix://\n    let address = args\n        .get(1)\n        .ok_or(\"First argument must be containerd's TTRPC address to publish events\")\n        .unwrap();\n\n    println!(\"Connecting: {}\", &address);\n\n    let publisher = RemotePublisher::new(address).expect(\"Connect failed\");\n\n    let mut event = TaskOOM::new();\n    event.set_container_id(\"123\".into());\n\n    let ctx = Context::default();\n\n    println!(\"Sending event\");\n\n    publisher\n        .publish(ctx, \"/tasks/oom\", \"default\", Box::new(event))\n        .expect(\"Publish failed\");\n\n    println!(\"Done\");\n}\n\n#[cfg(feature = \"async\")]\n#[tokio::main]\nasync fn main() {\n    let args: Vec<String> = env::args().collect();\n\n    // Must not start with unix://\n    let address = args\n        .get(1)\n        .ok_or(\"First argument must be containerd's TTRPC address to publish events\")\n        .unwrap();\n\n    println!(\"Connecting: {}\", &address);\n\n    let publisher = RemotePublisher::new(address).await.expect(\"Connect failed\");\n\n    let mut event = TaskOOM::new();\n    event.set_container_id(\"123\".into());\n\n    let ctx = Context::default();\n\n    println!(\"Sending event\");\n\n    publisher\n        .publish(ctx, \"/tasks/oom\", \"default\", Box::new(event))\n        .await\n        .expect(\"Publish failed\");\n\n    println!(\"Done\");\n}\n"
  },
  {
    "path": "crates/shim/examples/skeleton.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n#[cfg(not(feature = \"async\"))]\nuse containerd_shim as shim;\n\n#[cfg(not(feature = \"async\"))]\nmod skeleton {\n    use std::sync::Arc;\n\n    use containerd_shim as shim;\n    use log::info;\n    use shim::{\n        api, synchronous::publisher::RemotePublisher, Config, DeleteResponse, ExitSignal, Flags,\n        TtrpcContext, TtrpcResult,\n    };\n\n    #[derive(Clone)]\n    pub(crate) struct Service {\n        exit: Arc<ExitSignal>,\n    }\n\n    impl shim::Shim for Service {\n        type T = Service;\n\n        fn new(_runtime_id: &str, _args: &Flags, _config: &mut Config) -> Self {\n            Service {\n                exit: Arc::new(ExitSignal::default()),\n            }\n        }\n\n        fn start_shim(&mut self, opts: shim::StartOpts) -> Result<String, shim::Error> {\n            let grouping = opts.id.clone();\n            let (_child_id, address) = shim::spawn(opts, &grouping, Vec::new())?;\n            Ok(address)\n        }\n\n        fn delete_shim(&mut self) -> Result<DeleteResponse, shim::Error> {\n            Ok(DeleteResponse::new())\n        }\n\n        fn wait(&mut self) {\n            self.exit.wait();\n        }\n\n        fn create_task_service(&self, _publisher: RemotePublisher) -> Self::T {\n            self.clone()\n        }\n    }\n\n    impl shim::Task for Service {\n        fn connect(\n            &self,\n            _ctx: &TtrpcContext,\n            _req: api::ConnectRequest,\n        ) -> TtrpcResult<api::ConnectResponse> {\n            info!(\"Connect request\");\n            Ok(api::ConnectResponse {\n                version: String::from(\"example\"),\n                ..Default::default()\n            })\n        }\n\n        fn shutdown(\n            &self,\n            _ctx: &TtrpcContext,\n            _req: api::ShutdownRequest,\n        ) -> TtrpcResult<api::Empty> {\n            info!(\"Shutdown request\");\n            self.exit.signal();\n            Ok(api::Empty::default())\n        }\n    }\n}\n\nfn main() {\n    #[cfg(not(feature = \"async\"))]\n    shim::run::<skeleton::Service>(\"io.containerd.empty.v1\", None)\n}\n"
  },
  {
    "path": "crates/shim/examples/skeleton_async.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::sync::Arc;\n\nuse async_trait::async_trait;\nuse containerd_shim::{\n    asynchronous::{run, spawn, ExitSignal, Shim},\n    publisher::RemotePublisher,\n    Config, Error, Flags, StartOpts, TtrpcResult,\n};\nuse containerd_shim_protos::{\n    api, api::DeleteResponse, shim_async::Task, ttrpc::r#async::TtrpcContext,\n};\nuse log::info;\n\n#[derive(Clone)]\nstruct Service {\n    exit: Arc<ExitSignal>,\n}\n\n#[async_trait]\nimpl Shim for Service {\n    type T = Service;\n\n    async fn new(_runtime_id: &str, _args: &Flags, _config: &mut Config) -> Self {\n        Service {\n            exit: Arc::new(ExitSignal::default()),\n        }\n    }\n\n    async fn start_shim(&mut self, opts: StartOpts) -> Result<String, Error> {\n        let grouping = opts.id.clone();\n        let address = spawn(opts, &grouping, Vec::new()).await?;\n        Ok(address)\n    }\n\n    async fn delete_shim(&mut self) -> Result<DeleteResponse, Error> {\n        Ok(DeleteResponse::new())\n    }\n\n    async fn wait(&mut self) {\n        self.exit.wait().await;\n    }\n\n    async fn create_task_service(&self, _publisher: RemotePublisher) -> Self::T {\n        self.clone()\n    }\n}\n\n#[async_trait]\nimpl Task for Service {\n    async fn connect(\n        &self,\n        _ctx: &TtrpcContext,\n        _req: api::ConnectRequest,\n    ) -> TtrpcResult<api::ConnectResponse> {\n        info!(\"Connect request\");\n        Ok(api::ConnectResponse {\n            version: String::from(\"example\"),\n            ..Default::default()\n        })\n    }\n\n    async fn shutdown(\n        &self,\n        _ctx: &TtrpcContext,\n        _req: api::ShutdownRequest,\n    ) -> TtrpcResult<api::Empty> {\n        info!(\"Shutdown request\");\n        self.exit.signal();\n        Ok(api::Empty::default())\n    }\n}\n\n#[tokio::main]\nasync fn main() {\n    run::<Service>(\"io.containerd.empty.v1\", None).await;\n}\n"
  },
  {
    "path": "crates/shim/examples/windows_log_reader.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n#[cfg(windows)]\nuse std::error::Error;\n\n#[cfg(windows)]\nfn main() -> Result<(), Box<dyn Error>> {\n    use std::{\n        env,\n        fs::OpenOptions,\n        os::windows::{\n            fs::OpenOptionsExt,\n            io::{FromRawHandle, IntoRawHandle},\n        },\n        time::Duration,\n    };\n\n    use mio::{windows::NamedPipe, Events, Interest, Poll, Token};\n    use windows_sys::Win32::Storage::FileSystem::FILE_FLAG_OVERLAPPED;\n\n    let args: Vec<String> = env::args().collect();\n\n    let address = args\n        .get(1)\n        .ok_or(\"First argument must be shims address to read logs (\\\\\\\\.\\\\pipe\\\\containerd-shim-{ns}-{id}-log) \")\n        .unwrap();\n\n    println!(\"Reading logs from: {}\", &address);\n\n    let mut opts = OpenOptions::new();\n    opts.read(true)\n        .write(true)\n        .custom_flags(FILE_FLAG_OVERLAPPED);\n    let file = opts.open(address).unwrap();\n    let mut client = unsafe { NamedPipe::from_raw_handle(file.into_raw_handle()) };\n\n    let mut stdio = std::io::stdout();\n    let mut poll = Poll::new().unwrap();\n    poll.registry()\n        .register(&mut client, Token(1), Interest::READABLE)\n        .unwrap();\n    let mut events = Events::with_capacity(128);\n    loop {\n        poll.poll(&mut events, Some(Duration::from_millis(10)))\n            .unwrap();\n        match std::io::copy(&mut client, &mut stdio) {\n            Ok(_) => break,\n            Err(_) => continue,\n        }\n    }\n\n    Ok(())\n}\n\n#[cfg(unix)]\nfn main() {\n    println!(\"This example is only for Windows\");\n}\n"
  },
  {
    "path": "crates/shim/src/args.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::ffi::OsStr;\n\nuse crate::error::{Error, Result};\n\n/// Flags to be passed from containerd daemon to a shim binary.\n/// Reflects <https://github.com/containerd/containerd/blob/master/runtime/v2/shim/shim.go#L100>\n#[derive(Debug, Default)]\npub struct Flags {\n    /// Enable debug output in logs.\n    pub debug: bool,\n    /// Namespace that owns the shim.\n    pub namespace: String,\n    /// Id of the task.\n    pub id: String,\n    /// Abstract socket path to serve.\n    pub socket: String,\n    /// Path to the bundle if not workdir.\n    pub bundle: String,\n    /// GRPC address back to main containerd.\n    pub address: String,\n    /// Path to publish binary (used for publishing events).\n    pub publish_binary: String,\n    /// Shim action (start / delete).\n    /// See <https://github.com/containerd/containerd/blob/master/runtime/v2/shim/shim.go#L191>\n    pub action: String,\n    /// Version of the shim.\n    pub version: bool,\n    /// get the option protobuf from stdin, print the shim info protobuf to stdout, and exit\n    pub info: bool,\n}\n\n/// Parses command line arguments passed to the shim.\n#[cfg_attr(feature = \"tracing\", tracing::instrument(skip_all, level = \"Info\"))]\npub fn parse<S: AsRef<OsStr>>(args: &[S]) -> Result<Flags> {\n    let mut flags = Flags::default();\n\n    let mut version_short = false;\n    let mut version_long = false;\n    let args: Vec<String> = go_flag::parse_args(args, |f| {\n        f.add_flag(\"debug\", &mut flags.debug);\n        f.add_flag(\"v\", &mut version_short);\n        f.add_flag(\"version\", &mut version_long);\n        f.add_flag(\"namespace\", &mut flags.namespace);\n        f.add_flag(\"id\", &mut flags.id);\n        f.add_flag(\"socket\", &mut flags.socket);\n        f.add_flag(\"bundle\", &mut flags.bundle);\n        f.add_flag(\"address\", &mut flags.address);\n        f.add_flag(\"publish-binary\", &mut flags.publish_binary);\n        f.add_flag(\"info\", &mut flags.info);\n    })\n    .map_err(|e| Error::InvalidArgument(e.to_string()))?;\n\n    flags.version = version_short || version_long;\n\n    if let Some(action) = args.first() {\n        flags.action = action.into();\n    }\n\n    Ok(flags)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn parse_all() {\n        let args = [\n            \"-debug\",\n            \"-id\",\n            \"123\",\n            \"-namespace\",\n            \"default\",\n            \"-socket\",\n            \"/path/to/socket\",\n            \"-publish-binary\",\n            \"/path/to/binary\",\n            \"-bundle\",\n            \"bundle\",\n            \"-address\",\n            \"address\",\n            \"delete\",\n        ];\n\n        let flags = parse(&args).unwrap();\n\n        assert!(flags.debug);\n        assert!(!flags.version);\n        assert_eq!(flags.id, \"123\");\n        assert_eq!(flags.namespace, \"default\");\n        assert_eq!(flags.socket, \"/path/to/socket\");\n        assert_eq!(flags.publish_binary, \"/path/to/binary\");\n        assert_eq!(flags.bundle, \"bundle\");\n        assert_eq!(flags.address, \"address\");\n        assert_eq!(flags.action, \"delete\");\n    }\n\n    #[test]\n    fn parse_flags() {\n        let args = [\"-id\", \"123\", \"-namespace\", \"default\"];\n\n        let flags = parse(&args).unwrap();\n\n        assert!(!flags.debug);\n        assert_eq!(flags.id, \"123\");\n        assert_eq!(flags.namespace, \"default\");\n        assert_eq!(flags.action, \"\");\n    }\n\n    #[test]\n    fn parse_action() {\n        let args = [\"-namespace\", \"1\", \"start\"];\n\n        let flags = parse(&args).unwrap();\n        assert_eq!(flags.action, \"start\");\n        assert_eq!(flags.id, \"\");\n    }\n\n    #[test]\n    fn parse_version_long_flag() {\n        let flags = parse(&[\"-version\"]).unwrap();\n        assert!(flags.version);\n    }\n\n    #[test]\n    fn parse_version_short_flag() {\n        let flags = parse(&[\"-v\"]).unwrap();\n        assert!(flags.version);\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/asynchronous/mod.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{\n    env,\n    io::Read,\n    os::unix::{fs::FileTypeExt, net::UnixListener},\n    path::Path,\n    process::{self, Command as StdCommand, Stdio},\n    sync::{\n        atomic::{AtomicBool, Ordering},\n        Arc,\n    },\n    task::{ready, Poll},\n};\n\nuse async_trait::async_trait;\nuse containerd_shim_protos::{\n    api::DeleteResponse,\n    protobuf::{well_known_types::any::Any, Message, MessageField},\n    shim::oci::Options,\n    shim_async::{create_task, Client, Task},\n    ttrpc::r#async::Server,\n    types::introspection::{self, RuntimeInfo},\n};\nuse futures::stream::{poll_fn, BoxStream, SelectAll, StreamExt};\nuse libc::{SIGCHLD, SIGINT, SIGPIPE, SIGTERM};\nuse log::{debug, error, info, warn};\nuse nix::{\n    errno::Errno,\n    sys::{\n        signal::Signal,\n        wait::{self, WaitPidFlag, WaitStatus},\n    },\n    unistd::Pid,\n};\nuse oci_spec::runtime::Features;\nuse tokio::{io::AsyncWriteExt, process::Command, sync::Notify};\nuse which::which;\n\nconst DEFAULT_BINARY_NAME: &str = \"runc\";\n\nuse crate::{\n    args,\n    asynchronous::{monitor::monitor_notify_by_pid, publisher::RemotePublisher},\n    error::{Error, Result},\n    logger, parse_sockaddr, reap, socket_address,\n    util::{asyncify, read_file_to_str, write_str_to_file},\n    Config, Flags, StartOpts, TTRPC_ADDRESS,\n};\n\npub mod monitor;\npub mod publisher;\npub mod util;\n\n/// Asynchronous Main shim interface that must be implemented by all async shims.\n///\n/// Start and delete routines will be called to handle containerd's shim lifecycle requests.\n#[async_trait]\npub trait Shim {\n    /// Type to provide task service for the shim.\n    type T: Task + Send + Sync;\n\n    /// Create a new instance of  async Shim.\n    ///\n    /// # Arguments\n    /// - `runtime_id`: identifier of the container runtime.\n    /// - `id`: identifier of the shim/container, passed in from Containerd.\n    /// - `namespace`: namespace of the shim/container, passed in from Containerd.\n    /// - `config`: for the shim to pass back configuration information\n    async fn new(runtime_id: &str, args: &Flags, config: &mut Config) -> Self;\n\n    /// Start shim will be called by containerd when launching new shim instance.\n    ///\n    /// It expected to return TTRPC address containerd daemon can use to communicate with\n    /// the given shim instance.\n    /// See <https://github.com/containerd/containerd/tree/master/runtime/v2#start>\n    /// this is an asynchronous call\n    async fn start_shim(&mut self, opts: StartOpts) -> Result<String>;\n\n    /// Delete shim will be called by containerd after shim shutdown to cleanup any leftovers.\n    /// this is an asynchronous call\n    async fn delete_shim(&mut self) -> Result<DeleteResponse>;\n\n    /// Wait for the shim to exit asynchronously.\n    async fn wait(&mut self);\n\n    /// Create the task service object asynchronously.\n    async fn create_task_service(&self, publisher: RemotePublisher) -> Self::T;\n}\n\n/// Async Shim entry point that must be invoked from tokio `main`.\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\npub async fn run<T>(runtime_id: &str, opts: Option<Config>)\nwhere\n    T: Shim + Send + Sync + 'static,\n{\n    if let Some(err) = bootstrap::<T>(runtime_id, opts).await.err() {\n        eprintln!(\"{}: {:?}\", runtime_id, err);\n        process::exit(1);\n    }\n}\n/// get runtime info\npub fn run_info() -> Result<RuntimeInfo> {\n    let mut info = introspection::RuntimeInfo {\n        name: \"containerd-shim-runc-v2-rs\".to_string(),\n        version: MessageField::some(introspection::RuntimeVersion {\n            version: env!(\"CARGO_PKG_VERSION\").to_string(),\n            revision: String::default(),\n            ..Default::default()\n        }),\n        ..Default::default()\n    };\n    let mut binary_name = DEFAULT_BINARY_NAME.to_string();\n    let mut data: Vec<u8> = Vec::new();\n    std::io::stdin()\n        .read_to_end(&mut data)\n        .map_err(io_error!(e, \"read stdin\"))?;\n    // get BinaryName from stdin\n    if !data.is_empty() {\n        let opts =\n            Any::parse_from_bytes(&data).and_then(|any| Options::parse_from_bytes(&any.value))?;\n        if !opts.binary_name().is_empty() {\n            binary_name = opts.binary_name().to_string();\n        }\n    }\n    let binary_path = which(binary_name).unwrap();\n\n    // get features\n    let output = StdCommand::new(binary_path)\n        .arg(\"features\")\n        .output()\n        .unwrap();\n\n    let features: Features = serde_json::from_str(&String::from_utf8_lossy(&output.stdout))?;\n\n    // set features\n    let features_any = Any {\n        type_url: \"types.containerd.io/opencontainers/runtime-spec/1/features/Features\".to_string(),\n        // features to json\n        value: serde_json::to_vec(&features)?,\n        ..Default::default()\n    };\n    info.features = MessageField::some(features_any);\n\n    Ok(info)\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\nasync fn bootstrap<T>(runtime_id: &str, opts: Option<Config>) -> Result<()>\nwhere\n    T: Shim + Send + Sync + 'static,\n{\n    // Parse command line\n    let os_args: Vec<_> = env::args_os().collect();\n    let flags = args::parse(&os_args[1..])?;\n\n    let ttrpc_address = env::var(TTRPC_ADDRESS)?;\n    // Create shim instance\n    let mut config = opts.unwrap_or_default();\n\n    // Setup signals\n    let signals = setup_signals_tokio(&config);\n\n    if !config.no_sub_reaper {\n        reap::set_subreaper()?;\n    }\n\n    let mut shim = T::new(runtime_id, &flags, &mut config).await;\n\n    match flags.action.as_str() {\n        \"start\" => {\n            let args = StartOpts {\n                id: flags.id,\n                publish_binary: flags.publish_binary,\n                address: flags.address,\n                ttrpc_address,\n                namespace: flags.namespace,\n                debug: flags.debug,\n            };\n\n            let address = shim.start_shim(args).await?;\n            let mut stdout = tokio::io::stdout();\n            stdout\n                .write_all(address.as_bytes())\n                .await\n                .map_err(io_error!(e, \"write stdout\"))?;\n            // containerd occasionally read an empty string without flushing the stdout\n            stdout.flush().await.map_err(io_error!(e, \"flush stdout\"))?;\n            Ok(())\n        }\n        \"delete\" => {\n            tokio::spawn(async move {\n                handle_signals(signals).await;\n            });\n            let response = shim.delete_shim().await?;\n            let resp_bytes = response.write_to_bytes()?;\n            tokio::io::stdout()\n                .write_all(resp_bytes.as_slice())\n                .await\n                .map_err(io_error!(e, \"failed to write response\"))?;\n\n            Ok(())\n        }\n        _ => {\n            if flags.socket.is_empty() {\n                return Err(Error::InvalidArgument(String::from(\n                    \"Shim socket cannot be empty\",\n                )));\n            }\n\n            if !config.no_setup_logger {\n                logger::init(\n                    flags.debug,\n                    &config.default_log_level,\n                    &flags.namespace,\n                    &flags.id,\n                )?;\n            }\n\n            let publisher = RemotePublisher::new(&ttrpc_address).await?;\n            let task = Box::new(shim.create_task_service(publisher).await)\n                as Box<dyn containerd_shim_protos::shim_async::Task + Send + Sync>;\n            let task_service = create_task(Arc::from(task));\n            let Some(mut server) = create_server_with_retry(&flags).await? else {\n                signal_server_started();\n                return Ok(());\n            };\n            server = server.register_service(task_service);\n            server.start().await?;\n\n            signal_server_started();\n\n            info!(\"Shim successfully started, waiting for exit signal...\");\n            tokio::spawn(async move {\n                handle_signals(signals).await;\n            });\n            shim.wait().await;\n\n            info!(\"Shutting down shim instance\");\n            server.shutdown().await.unwrap_or_default();\n\n            // NOTE: If the shim server is down(like oom killer), the address\n            // socket might be leaking.\n            if let Ok(address) = read_file_to_str(\"address\").await {\n                remove_socket_silently(&address).await;\n            }\n            Ok(())\n        }\n    }\n}\n\n/// Helper structure that wraps atomic bool to signal shim server when to shutdown the TTRPC server.\n///\n/// Shim implementations are responsible for calling [`Self::signal`].\npub struct ExitSignal {\n    notifier: Notify,\n    exited: AtomicBool,\n}\n\nimpl Default for ExitSignal {\n    fn default() -> Self {\n        ExitSignal {\n            notifier: Notify::new(),\n            exited: AtomicBool::new(false),\n        }\n    }\n}\n\nimpl ExitSignal {\n    /// Set exit signal to shutdown shim server.\n    pub fn signal(&self) {\n        self.exited.store(true, Ordering::SeqCst);\n        self.notifier.notify_waiters();\n    }\n\n    /// Wait for the exit signal to be set.\n    pub async fn wait(&self) {\n        loop {\n            let notified = self.notifier.notified();\n            if self.exited.load(Ordering::SeqCst) {\n                return;\n            }\n            notified.await;\n        }\n    }\n}\n\n/// Spawn is a helper func to launch shim process asynchronously.\n/// Typically this expected to be called from `StartShim`.\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\npub async fn spawn(opts: StartOpts, grouping: &str, vars: Vec<(&str, &str)>) -> Result<String> {\n    let cmd = env::current_exe().map_err(io_error!(e, \"\"))?;\n    let cwd = env::current_dir().map_err(io_error!(e, \"\"))?;\n    let address = socket_address(&opts.address, &opts.namespace, grouping);\n\n    // Activation pattern comes from the hcsshim: https://github.com/microsoft/hcsshim/blob/v0.10.0-rc.7/cmd/containerd-shim-runhcs-v1/serve.go#L57-L70\n    // another way to do it would to create named pipe and pass it to the child process through handle inheritence but that would require duplicating\n    // the logic in Rust's 'command' for process creation.  There is an  issue in Rust to make it simplier to specify handle inheritence and this could\n    // be revisited once https://github.com/rust-lang/rust/issues/54760 is implemented.\n\n    let mut command = Command::new(cmd);\n    command\n        .current_dir(cwd)\n        .stdout(Stdio::piped())\n        .stdin(Stdio::null())\n        .stderr(Stdio::null())\n        .envs(vars)\n        .args([\n            \"-namespace\",\n            &opts.namespace,\n            \"-id\",\n            &opts.id,\n            \"-address\",\n            &opts.address,\n            \"-socket\",\n            &address,\n        ]);\n\n    if opts.debug {\n        command.arg(\"-debug\");\n    }\n\n    let mut child = command.spawn().map_err(io_error!(e, \"spawn shim\"))?;\n\n    #[cfg(target_os = \"linux\")]\n    crate::cgroup::set_cgroup_and_oom_score(child.id().unwrap())?;\n\n    let mut reader = child.stdout.take().unwrap();\n    tokio::io::copy(&mut reader, &mut tokio::io::stderr())\n        .await\n        .unwrap();\n\n    Ok(address)\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(skip_all, level = \"info\"))]\nasync fn create_server(flags: &args::Flags) -> Result<Server> {\n    use containerd_shim_protos::ttrpc::r#async::transport::Listener;\n    let listener = start_listener(&flags.socket).await?;\n    let listener = Listener::try_from(listener).map_err(io_error!(e, \"creating ttrpc listener\"))?;\n    let server = Server::new().add_listener(listener);\n    Ok(server)\n}\n\nasync fn create_server_with_retry(flags: &args::Flags) -> Result<Option<Server>> {\n    // Really try to create a server.\n    let server = match create_server(flags).await {\n        Ok(server) => server,\n        Err(Error::IoError { err, .. }) if err.kind() == std::io::ErrorKind::AddrInUse => {\n            // If the address is already in use then make sure it is up and running and return the address\n            // This allows for running a single shim per container scenarios\n            if let Ok(()) = wait_socket_working(&flags.socket, 5, 200).await {\n                write_str_to_file(\"address\", &flags.socket).await?;\n                return Ok(None);\n            }\n            remove_socket(&flags.socket).await?;\n            create_server(flags).await?\n        }\n        Err(e) => return Err(e),\n    };\n\n    Ok(Some(server))\n}\n\nfn signal_server_started() {\n    use libc::{dup2, STDERR_FILENO, STDOUT_FILENO};\n\n    unsafe {\n        if dup2(STDERR_FILENO, STDOUT_FILENO) < 0 {\n            panic!(\"Error closing pipe: {}\", std::io::Error::last_os_error())\n        }\n    }\n}\n\n#[cfg(unix)]\nfn signal_stream(kind: i32) -> std::io::Result<BoxStream<'static, i32>> {\n    use tokio::signal::unix::{signal, SignalKind};\n    let kind = SignalKind::from_raw(kind);\n    signal(kind).map(|mut sig| {\n        // The object returned by `signal` is not a `Stream`.\n        // The `poll_fn` function constructs a `Stream` based on a polling function.\n        // We need to create a `Stream` so that we can use the `SelectAll` stream \"merge\"\n        // all the signal streams.\n        poll_fn(move |cx| {\n            ready!(sig.poll_recv(cx));\n            Poll::Ready(Some(kind.as_raw_value()))\n        })\n        .boxed()\n    })\n}\n\n#[cfg(windows)]\nfn signal_stream(kind: i32) -> std::io::Result<BoxStream<'static, i32>> {\n    use tokio::signal::windows::ctrl_c;\n\n    // Windows doesn't have similar signal like SIGCHLD\n    // We could implement something if required but for now\n    // just implement support for SIGINT\n    if kind != SIGINT {\n        return Err(std::io::Error::new(\n            std::io::ErrorKind::Other,\n            format!(\"Invalid signal {kind}\"),\n        ));\n    }\n\n    ctrl_c().map(|mut sig| {\n        // The object returned by `signal` is not a `Stream`.\n        // The `poll_fn` function constructs a `Stream` based on a polling function.\n        // We need to create a `Stream` so that we can use the `SelectAll` stream \"merge\"\n        // all the signal streams.\n        poll_fn(move |cx| {\n            ready!(sig.poll_recv(cx));\n            Poll::Ready(Some(kind))\n        })\n        .boxed()\n    })\n}\n\ntype Signals = SelectAll<BoxStream<'static, i32>>;\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(skip_all, level = \"info\"))]\nfn setup_signals_tokio(config: &Config) -> Signals {\n    #[cfg(unix)]\n    let signals: &[i32] = if config.no_reaper {\n        &[SIGTERM, SIGINT, SIGPIPE]\n    } else {\n        &[SIGTERM, SIGINT, SIGPIPE, SIGCHLD]\n    };\n\n    // Windows doesn't have similar signal like SIGCHLD\n    // We could implement something if required but for now\n    // just listen for SIGINT\n    // Note: see comment at the counterpart in synchronous/mod.rs for details.\n    #[cfg(windows)]\n    let signals: &[i32] = &[SIGINT];\n\n    let signals: Vec<_> = signals\n        .iter()\n        .copied()\n        .map(signal_stream)\n        .collect::<std::io::Result<_>>()\n        .expect(\"signal setup failed\");\n\n    SelectAll::from_iter(signals)\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(skip_all, level = \"info\"))]\nasync fn handle_signals(signals: Signals) {\n    let mut signals = signals.fuse();\n    while let Some(sig) = signals.next().await {\n        match sig {\n            SIGPIPE => {}\n            SIGTERM | SIGINT => {\n                debug!(\"received {}\", sig);\n            }\n            SIGCHLD => loop {\n                // Note: see comment at the counterpart in synchronous/mod.rs for details.\n                match wait::waitpid(Some(Pid::from_raw(-1)), Some(WaitPidFlag::WNOHANG)) {\n                    Ok(WaitStatus::Exited(pid, status)) => {\n                        monitor_notify_by_pid(pid.as_raw(), status)\n                            .await\n                            .unwrap_or_else(|e| error!(\"failed to send exit event {}\", e))\n                    }\n                    Ok(WaitStatus::Signaled(pid, sig, _)) => {\n                        debug!(\"child {} terminated({})\", pid, sig);\n                        let exit_code = 128 + sig as i32;\n                        monitor_notify_by_pid(pid.as_raw(), exit_code)\n                            .await\n                            .unwrap_or_else(|e| error!(\"failed to send signal event {}\", e))\n                    }\n                    Ok(WaitStatus::StillAlive) => {\n                        break;\n                    }\n                    Err(Errno::ECHILD) => {\n                        break;\n                    }\n                    Err(e) => {\n                        warn!(\"error occurred in signal handler: {}\", e);\n                    }\n                    _ => {}\n                }\n            },\n            _ => {\n                if let Ok(sig) = Signal::try_from(sig) {\n                    debug!(\"received {}\", sig);\n                } else {\n                    warn!(\"received invalid signal {}\", sig);\n                }\n            }\n        }\n    }\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\nasync fn remove_socket_silently(address: &str) {\n    remove_socket(address)\n        .await\n        .unwrap_or_else(|e| warn!(\"failed to remove socket: {}\", e))\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\nasync fn remove_socket(address: &str) -> Result<()> {\n    let path = parse_sockaddr(address);\n    if let Ok(md) = Path::new(path).metadata() {\n        if md.file_type().is_socket() {\n            tokio::fs::remove_file(path).await.map_err(io_error!(\n                e,\n                \"failed to remove socket {}\",\n                address\n            ))?;\n        }\n    }\n    Ok(())\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(skip_all, level = \"info\"))]\nasync fn start_listener(address: &str) -> Result<UnixListener> {\n    let addr = address.to_string();\n    asyncify(move || -> Result<UnixListener> {\n        crate::start_listener(&addr).map_err(|e| Error::IoError {\n            context: format!(\"failed to start listener {}\", addr),\n            err: e,\n        })\n    })\n    .await\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\nasync fn wait_socket_working(address: &str, interval_in_ms: u64, count: u32) -> Result<()> {\n    for _i in 0..count {\n        match Client::connect(address).await {\n            Ok(_) => {\n                return Ok(());\n            }\n            Err(_) => {\n                tokio::time::sleep(std::time::Duration::from_millis(interval_in_ms)).await;\n            }\n        }\n    }\n    Err(other!(\"time out waiting for socket {}\", address))\n}\n\n#[cfg(test)]\nmod tests {\n    use std::sync::Arc;\n\n    use crate::asynchronous::{start_listener, ExitSignal};\n\n    #[tokio::test]\n    async fn test_exit_signal() {\n        let signal = Arc::new(ExitSignal::default());\n\n        let cloned = signal.clone();\n        let handle = tokio::spawn(async move {\n            cloned.wait().await;\n        });\n\n        signal.signal();\n\n        if let Err(err) = handle.await {\n            panic!(\"{:?}\", err);\n        }\n    }\n\n    #[tokio::test]\n    async fn test_start_listener() {\n        let tmpdir = tempfile::tempdir().unwrap();\n        let path = tmpdir.path().to_str().unwrap().to_owned();\n\n        let socket = path + \"/ns1/id1/socket\";\n        let _listener = start_listener(&socket).await.unwrap();\n        let _listener2 = start_listener(&socket)\n            .await\n            .expect_err(\"socket should already in use\");\n\n        let socket2 = socket + \"/socket\";\n        assert!(start_listener(&socket2).await.is_err());\n\n        let path = tmpdir.path().to_str().unwrap().to_owned();\n        let txt_file = path + \"/demo.txt\";\n        tokio::fs::write(&txt_file, \"test\").await.unwrap();\n        assert!(start_listener(&txt_file).await.is_err());\n        let context = tokio::fs::read_to_string(&txt_file).await.unwrap();\n        assert_eq!(context, \"test\");\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/asynchronous/monitor.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{collections::HashMap, sync::LazyLock};\n\nuse log::error;\nuse tokio::sync::{\n    mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender},\n    Mutex,\n};\n\nuse crate::{\n    error::{Error, Result},\n    monitor::{ExitEvent, Subject, Topic},\n};\n\npub static MONITOR: LazyLock<Mutex<Monitor>> = LazyLock::new(|| {\n    Mutex::new(Monitor {\n        seq_id: 0,\n        subscribers: HashMap::new(),\n        topic_subs: HashMap::new(),\n    })\n});\n\npub async fn monitor_subscribe(topic: Topic) -> Result<Subscription> {\n    let mut monitor = MONITOR.lock().await;\n    let s = monitor.subscribe(topic)?;\n    Ok(s)\n}\n\npub async fn monitor_unsubscribe(sub_id: i64) -> Result<()> {\n    let mut monitor = MONITOR.lock().await;\n    monitor.unsubscribe(sub_id)\n}\n\npub async fn monitor_notify_by_pid(pid: i32, exit_code: i32) -> Result<()> {\n    let monitor = MONITOR.lock().await;\n    monitor.notify_by_pid(pid, exit_code).await\n}\n\npub async fn monitor_notify_by_exec(id: &str, exec_id: &str, exit_code: i32) -> Result<()> {\n    let monitor = MONITOR.lock().await;\n    monitor.notify_by_exec(id, exec_id, exit_code).await\n}\n\npub struct Monitor {\n    pub(crate) seq_id: i64,\n    pub(crate) subscribers: HashMap<i64, Subscriber>,\n    pub(crate) topic_subs: HashMap<Topic, Vec<i64>>,\n}\n\npub(crate) struct Subscriber {\n    pub(crate) topic: Topic,\n    pub(crate) tx: UnboundedSender<ExitEvent>,\n}\n\npub struct Subscription {\n    pub id: i64,\n    pub rx: UnboundedReceiver<ExitEvent>,\n}\n\nimpl Monitor {\n    pub fn subscribe(&mut self, topic: Topic) -> Result<Subscription> {\n        let (tx, rx) = unbounded_channel::<ExitEvent>();\n        let id = self.seq_id;\n        self.seq_id += 1;\n        let subscriber = Subscriber {\n            tx,\n            topic: topic.clone(),\n        };\n\n        self.subscribers.insert(id, subscriber);\n        self.topic_subs.entry(topic).or_default().push(id);\n        Ok(Subscription { id, rx })\n    }\n\n    pub async fn notify_by_pid(&self, pid: i32, exit_code: i32) -> Result<()> {\n        let subject = Subject::Pid(pid);\n        self.notify_topic(&Topic::Pid, &subject, exit_code).await;\n        self.notify_topic(&Topic::All, &subject, exit_code).await;\n        Ok(())\n    }\n\n    pub async fn notify_by_exec(&self, cid: &str, exec_id: &str, exit_code: i32) -> Result<()> {\n        let subject = Subject::Exec(cid.into(), exec_id.into());\n        self.notify_topic(&Topic::Exec, &subject, exit_code).await;\n        self.notify_topic(&Topic::All, &subject, exit_code).await;\n        Ok(())\n    }\n\n    // notify_topic try best to notify exit codes to all subscribers and log errors.\n    async fn notify_topic(&self, topic: &Topic, subject: &Subject, exit_code: i32) {\n        let mut results = Vec::new();\n        if let Some(subs) = self.topic_subs.get(topic) {\n            let subscribers = subs.iter().filter_map(|x| self.subscribers.get(x));\n            for sub in subscribers {\n                let res = sub\n                    .tx\n                    .send(ExitEvent {\n                        subject: subject.clone(),\n                        exit_code,\n                    })\n                    .map_err(other_error!(\"failed to send exit code\"));\n                results.push(res);\n            }\n        }\n        let mut result_iter = results.iter();\n        while let Some(Err(e)) = result_iter.next() {\n            error!(\"failed to send exit code to subscriber {:?}\", e)\n        }\n    }\n\n    pub fn unsubscribe(&mut self, id: i64) -> Result<()> {\n        let sub = self.subscribers.remove(&id);\n        if let Some(s) = sub {\n            self.topic_subs.get_mut(&s.topic).map(|v| {\n                v.iter().position(|&x| x == id).map(|i| {\n                    v.remove(i);\n                })\n            });\n        }\n        Ok(())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::{\n        asynchronous::monitor::{\n            monitor_notify_by_exec, monitor_notify_by_pid, monitor_subscribe, monitor_unsubscribe,\n        },\n        monitor::{ExitEvent, Subject, Topic},\n    };\n\n    #[tokio::test]\n    async fn test_monitor() {\n        let mut s = monitor_subscribe(Topic::Pid).await.unwrap();\n        let mut s1 = monitor_subscribe(Topic::All).await.unwrap();\n        let mut s2 = monitor_subscribe(Topic::Exec).await.unwrap();\n        monitor_notify_by_pid(13, 128).await.unwrap();\n        monitor_notify_by_exec(\"test-container\", \"test-exec\", 139)\n            .await\n            .unwrap();\n        // pid subscription receive only pid event\n        if let Some(ExitEvent {\n            subject: Subject::Pid(p),\n            exit_code: ec,\n        }) = s.rx.recv().await\n        {\n            assert_eq!(ec, 128);\n            assert_eq!(p, 13);\n        } else {\n            panic!(\"can not receive the notified event\");\n        }\n\n        // topic all receive all events\n        if let Some(ExitEvent {\n            subject: Subject::Pid(p),\n            exit_code: ec,\n        }) = s1.rx.recv().await\n        {\n            assert_eq!(ec, 128);\n            assert_eq!(p, 13);\n        } else {\n            panic!(\"can not receive the notified event\");\n        }\n        if let Some(ExitEvent {\n            subject: Subject::Exec(cid, eid),\n            exit_code: ec,\n        }) = s1.rx.recv().await\n        {\n            assert_eq!(cid, \"test-container\");\n            assert_eq!(eid, \"test-exec\");\n            assert_eq!(ec, 139);\n        } else {\n            panic!(\"can not receive the notified event\");\n        }\n\n        // exec topic only receive exec exit event\n        if let Some(ExitEvent {\n            subject: Subject::Exec(cid, eid),\n            exit_code: ec,\n        }) = s2.rx.recv().await\n        {\n            assert_eq!(cid, \"test-container\");\n            assert_eq!(eid, \"test-exec\");\n            assert_eq!(ec, 139);\n        } else {\n            panic!(\"can not receive the notified event\");\n        }\n        monitor_unsubscribe(s.id).await.unwrap();\n        monitor_unsubscribe(s1.id).await.unwrap();\n        monitor_unsubscribe(s2.id).await.unwrap();\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/asynchronous/publisher.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::os::unix::io::RawFd;\n\nuse async_trait::async_trait;\nuse containerd_shim_protos::{\n    api::Empty,\n    protobuf::MessageDyn,\n    shim::{event::Envelope, events},\n    shim_async::{Client, Events, EventsClient},\n    ttrpc,\n    ttrpc::{context::Context, r#async::TtrpcContext},\n};\nuse log::{debug, error, warn};\nuse tokio::sync::mpsc;\n\nuse crate::{\n    error::{self, Result},\n    util::{asyncify, connect, convert_to_any, timestamp},\n};\n\n/// The publisher reports events and uses a queue to retry the event reporting.\n/// The maximum number of attempts to report is 5 times.\n/// When the ttrpc client fails to report, it attempts to reconnect to the client and report.\n///\n/// Max queue size\nconst QUEUE_SIZE: i64 = 1024;\n/// Max try five times\nconst MAX_REQUEUE: i64 = 5;\n\n/// Async Remote publisher connects to containerd's TTRPC endpoint to publish events from shim.\npub struct RemotePublisher {\n    pub address: String,\n    sender: mpsc::Sender<Item>,\n}\n\n#[derive(Clone, Debug)]\npub struct Item {\n    ev: Envelope,\n    ctx: Context,\n    count: i64,\n}\n\nimpl RemotePublisher {\n    /// Connect to containerd's TTRPC endpoint asynchronously.\n    ///\n    /// containerd uses `/run/containerd/containerd.sock.ttrpc` by default\n    pub async fn new(address: impl AsRef<str>) -> Result<RemotePublisher> {\n        let client = Self::connect(address.as_ref()).await?;\n        // Init the queue channel\n        let (sender, receiver) = mpsc::channel::<Item>(QUEUE_SIZE as usize);\n        let rt = RemotePublisher {\n            address: address.as_ref().to_string(),\n            sender,\n        };\n        rt.process_queue(client, receiver).await;\n        Ok(rt)\n    }\n\n    /// Process_queue for push events\n    ///\n    /// This is a loop task for dealing event tasks\n    pub async fn process_queue(&self, ttrpc_client: Client, mut receiver: mpsc::Receiver<Item>) {\n        let mut client = EventsClient::new(ttrpc_client);\n        let sender = self.sender.clone();\n        let address = self.address.clone();\n        tokio::spawn(async move {\n            // only this use receiver\n            while let Some(item) = receiver.recv().await {\n                // drop this event after MAX_REQUEUE try\n                if item.count > MAX_REQUEUE {\n                    debug!(\"drop event {:?}\", item);\n                    continue;\n                }\n                let mut req = events::ForwardRequest::new();\n                req.set_envelope(item.ev.clone());\n                let new_item = Item {\n                    ev: item.ev.clone(),\n                    ctx: item.ctx.clone(),\n                    count: item.count + 1,\n                };\n                if let Err(e) = client.forward(new_item.ctx.clone(), &req).await {\n                    match e {\n                        ttrpc::error::Error::RemoteClosed | ttrpc::error::Error::LocalClosed => {\n                            warn!(\"publish fail because the server or client close {:?}\", e);\n                            // reconnect client\n                            if let Ok(c) = Self::connect(address.as_str()).await.map_err(|e| {\n                                debug!(\"reconnect the ttrpc client {:?} fail\", e);\n                            }) {\n                                client = EventsClient::new(c);\n                            }\n                        }\n                        _ => {\n                            // TODO! if it is other error , May we should deal with socket file\n                            error!(\"the client forward err is {:?}\", e);\n                        }\n                    }\n                    let sender_ref = sender.clone();\n                    // Take a another task requeue , for no blocking the recv task\n                    tokio::spawn(async move {\n                        // wait for few time and send for imporving the success ratio\n                        tokio::time::sleep(tokio::time::Duration::from_secs(new_item.count as u64))\n                            .await;\n                        // if channel is full and send fail ,release it after 3 seconds\n                        let _ = sender_ref\n                            .send_timeout(new_item, tokio::time::Duration::from_secs(3))\n                            .await;\n                    });\n                }\n            }\n            debug!(\"publisher 'process_queue' quit complete\");\n        });\n    }\n\n    async fn connect(address: impl AsRef<str>) -> Result<Client> {\n        let addr = address.as_ref().to_string();\n        let fd = asyncify(move || -> Result<RawFd> {\n            let fd = connect(addr)?;\n            Ok(fd)\n        })\n        .await?;\n\n        // Safety: `fd` is a unix socket returned by `connect()`.\n        // `from_raw_unix_socket_fd` takes ownership of the RawFd.\n        Ok(unsafe { Client::from_raw_unix_socket_fd(fd) })\n    }\n\n    /// Publish a new event.\n    ///\n    /// Event object can be anything that Protobuf able serialize (e.g. implement `Message` trait).\n    pub async fn publish(\n        &self,\n        ctx: Context,\n        topic: &str,\n        namespace: &str,\n        event: Box<dyn MessageDyn>,\n    ) -> Result<()> {\n        let mut envelope = Envelope::new();\n        envelope.set_topic(topic.to_owned());\n        envelope.set_namespace(namespace.to_owned());\n        envelope.set_timestamp(timestamp()?);\n        envelope.set_event(convert_to_any(event)?);\n\n        let item = Item {\n            ev: envelope.clone(),\n            ctx: ctx.clone(),\n            count: 0,\n        };\n\n        //if channel is full and send fail ,release it after 3 seconds\n        self.sender\n            .send_timeout(item, tokio::time::Duration::from_secs(3))\n            .await\n            .map_err(|e| error::Error::Ttrpc(ttrpc::error::Error::Others(e.to_string())))?;\n\n        Ok(())\n    }\n}\n\n#[async_trait]\nimpl Events for RemotePublisher {\n    async fn forward(\n        &self,\n        _ctx: &TtrpcContext,\n        req: events::ForwardRequest,\n    ) -> ttrpc::Result<Empty> {\n        let item = Item {\n            ev: req.envelope().clone(),\n            ctx: Context::default(),\n            count: 0,\n        };\n\n        //if channel is full and send fail ,release it after 3 seconds\n        self.sender\n            .send_timeout(item, tokio::time::Duration::from_secs(3))\n            .await\n            .map_err(|e| error::Error::Ttrpc(ttrpc::error::Error::Others(e.to_string())))?;\n\n        Ok(Empty::default())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::{os::unix::net::UnixListener, sync::Arc};\n\n    use async_trait::async_trait;\n    use containerd_shim_protos::{\n        api::{Empty, ForwardRequest},\n        events::task::TaskOOM,\n        shim_async::{create_events, Events},\n        ttrpc::asynchronous::{transport::Listener, Server},\n    };\n    use tokio::sync::{\n        mpsc::{channel, Sender},\n        Barrier,\n    };\n\n    use super::*;\n    use crate::publisher::ttrpc::r#async::TtrpcContext;\n\n    struct FakeServer {\n        tx: Sender<i32>,\n    }\n\n    #[async_trait]\n    impl Events for FakeServer {\n        async fn forward(&self, _ctx: &TtrpcContext, req: ForwardRequest) -> ttrpc::Result<Empty> {\n            let env = req.envelope();\n            if env.topic() == \"/tasks/oom\" {\n                self.tx.send(0).await.unwrap();\n            } else {\n                self.tx.send(-1).await.unwrap();\n            }\n            Ok(Empty::default())\n        }\n    }\n\n    #[tokio::test]\n    async fn test_connect() {\n        let tmpdir = tempfile::tempdir().unwrap();\n        let path = format!(\"{}/socket\", tmpdir.as_ref().to_str().unwrap());\n        let path1 = path.clone();\n\n        assert!(RemotePublisher::connect(\"a\".repeat(16384)).await.is_err());\n        assert!(RemotePublisher::connect(&path).await.is_err());\n\n        let (tx, mut rx) = channel(1);\n        let server = FakeServer { tx };\n        let barrier = Arc::new(Barrier::new(2));\n        let barrier2 = barrier.clone();\n        let server_thread = tokio::spawn(async move {\n            let listener = UnixListener::bind(&path1).unwrap();\n            let listener = Listener::try_from(listener).unwrap();\n            let service = create_events(Arc::new(server));\n            let mut server = Server::new()\n                .add_listener(listener)\n                .register_service(service);\n            server.start().await.unwrap();\n            barrier2.wait().await;\n\n            barrier2.wait().await;\n            server.shutdown().await.unwrap();\n        });\n\n        barrier.wait().await;\n        let client = RemotePublisher::new(&path).await.unwrap();\n        let mut msg = TaskOOM::new();\n        msg.set_container_id(\"test\".to_string());\n        client\n            .publish(Context::default(), \"/tasks/oom\", \"ns1\", Box::new(msg))\n            .await\n            .unwrap();\n        match rx.recv().await {\n            Some(0) => {}\n            _ => {\n                panic!(\"the received event is not same as published\")\n            }\n        }\n        barrier.wait().await;\n        server_thread.await.unwrap();\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/asynchronous/util.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::path::Path;\n\nuse containerd_shim_protos::{api::Mount, shim::oci::Options};\nuse libc::mode_t;\nuse nix::sys::stat::Mode;\nuse oci_spec::runtime::Spec;\nuse tokio::{\n    fs::OpenOptions,\n    io::{AsyncReadExt, AsyncWriteExt},\n    task::spawn_blocking,\n};\n\nuse crate::{\n    error::{Error, Result},\n    util::{AsOption, JsonOptions, CONFIG_FILE_NAME, OPTIONS_FILE_NAME, RUNTIME_FILE_NAME},\n};\n\npub async fn asyncify<F, T>(f: F) -> Result<T>\nwhere\n    F: FnOnce() -> Result<T> + Send + 'static,\n    T: Send + 'static,\n{\n    spawn_blocking(f)\n        .await\n        .map_err(other_error!(\"failed to spawn blocking task\"))?\n}\n\npub async fn read_file_to_str(path: impl AsRef<Path>) -> Result<String> {\n    let mut file = tokio::fs::File::open(&path).await.map_err(io_error!(\n        e,\n        \"failed to open file {}\",\n        path.as_ref().display()\n    ))?;\n\n    let mut content = String::new();\n    file.read_to_string(&mut content).await.map_err(io_error!(\n        e,\n        \"failed to read {}\",\n        path.as_ref().display()\n    ))?;\n    Ok(content)\n}\n\npub async fn write_str_to_file(filename: impl AsRef<Path>, s: impl AsRef<str>) -> Result<()> {\n    let file = filename.as_ref().file_name().ok_or_else(|| {\n        Error::InvalidArgument(format!(\"pid path illegal {}\", filename.as_ref().display()))\n    })?;\n    let tmp_path = filename\n        .as_ref()\n        .parent()\n        .map(|x| x.join(format!(\".{}\", file.to_str().unwrap_or(\"\"))))\n        .ok_or_else(|| Error::InvalidArgument(String::from(\"failed to create tmp path\")))?;\n    let mut f = OpenOptions::new()\n        .write(true)\n        .create_new(true)\n        .open(&tmp_path)\n        .await\n        .map_err(io_error!(e, \"open {}\", tmp_path.display()))?;\n    f.write_all(s.as_ref().as_bytes()).await.map_err(io_error!(\n        e,\n        \"write tmp file {}\",\n        tmp_path.display()\n    ))?;\n    tokio::fs::rename(&tmp_path, &filename)\n        .await\n        .map_err(io_error!(\n            e,\n            \"rename tmp file to {}\",\n            filename.as_ref().display()\n        ))?;\n    Ok(())\n}\n\npub async fn read_pid_from_file(pid_path: &Path) -> Result<i32> {\n    let pid_str = read_file_to_str(pid_path).await?;\n    let pid = pid_str.parse::<i32>()?;\n    Ok(pid)\n}\n\npub async fn read_spec(bundle: impl AsRef<Path>) -> Result<Spec> {\n    let path = bundle.as_ref().join(CONFIG_FILE_NAME);\n    let content = read_file_to_str(&path).await?;\n    serde_json::from_str::<Spec>(content.as_str()).map_err(other_error!(\"read spec\"))\n}\n\n// read_options reads the option information from the path.\n// When the file does not exist, read_options returns nil without an error.\npub async fn read_options(bundle: impl AsRef<Path>) -> Result<Options> {\n    let path = bundle.as_ref().join(OPTIONS_FILE_NAME);\n    if !path.exists() {\n        return Ok(Options::default());\n    }\n    let opts_str = read_file_to_str(path).await?;\n    let opts =\n        serde_json::from_str::<JsonOptions>(&opts_str).map_err(other_error!(\"read options\"))?;\n    Ok(opts.into())\n}\n\npub async fn read_runtime(bundle: impl AsRef<Path>) -> Result<String> {\n    read_file_to_str(bundle.as_ref().join(RUNTIME_FILE_NAME)).await\n}\n\npub async fn write_options(bundle: impl AsRef<Path>, opt: &Options) -> Result<()> {\n    let json_opt = JsonOptions::from(opt.to_owned());\n    let opts_str = serde_json::to_string(&json_opt)?;\n    let path = bundle.as_ref().join(OPTIONS_FILE_NAME);\n    write_str_to_file(path.as_path(), opts_str.as_str()).await\n}\n\npub async fn write_runtime(bundle: impl AsRef<Path>, binary_name: &str) -> Result<()> {\n    write_str_to_file(bundle.as_ref().join(RUNTIME_FILE_NAME), binary_name).await\n}\n\npub async fn mount_rootfs(m: &Mount, target: impl AsRef<Path>) -> Result<()> {\n    let mount_type = m.type_.to_string();\n    let source = m.source.to_string();\n    let options = m.options.to_vec();\n    let rootfs = target.as_ref().to_owned();\n    asyncify(move || -> Result<()> {\n        let mount_type = mount_type.as_option();\n        let source = source.as_option();\n        crate::mount::mount_rootfs(mount_type, source, options.as_slice(), &rootfs)\n    })\n    .await\n}\n\npub async fn mkdir(path: impl AsRef<Path>, mode: mode_t) -> Result<()> {\n    let path_buf = path.as_ref().to_path_buf();\n    asyncify(move || -> Result<()> {\n        if !path_buf.as_path().exists() {\n            let mode = Mode::from_bits(mode).ok_or_else(|| other!(\"invalid dir mode {}\", mode))?;\n            nix::unistd::mkdir(path_buf.as_path(), mode)?;\n        }\n        Ok(())\n    })\n    .await\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::util::{read_file_to_str, write_str_to_file};\n\n    #[tokio::test]\n    async fn test_read_write_str() {\n        let tmpdir = tempfile::tempdir().unwrap();\n        let tmp_file = tmpdir.path().join(\"test\");\n        let test_str = \"this is a test\";\n        write_str_to_file(&tmp_file, test_str).await.unwrap();\n        let read_str = read_file_to_str(&tmp_file).await.unwrap();\n        assert_eq!(read_str, test_str);\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/cgroup.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n#![cfg(target_os = \"linux\")]\n\nuse std::{\n    error::Error as StdError,\n    fs,\n    io::Read,\n    path::{Path, PathBuf},\n};\n\nuse cgroups_rs::{\n    fs::{\n        cgroup::get_cgroups_relative_paths_by_pid, error::Result as CgResult, hierarchies, Cgroup,\n        MaxValue, Subsystem,\n    },\n    CgroupPid,\n};\nuse containerd_shim_protos::{\n    cgroups::metrics::{CPUStat, CPUUsage, MemoryEntry, MemoryStat, Metrics, PidsStat, Throttle},\n    protobuf::{well_known_types::any::Any, Message},\n    shim::oci::Options,\n};\nuse oci_spec::runtime::LinuxResources;\n\nuse crate::error::{Error, Result};\n\n// OOM_SCORE_ADJ_MAX is from https://github.com/torvalds/linux/blob/master/include/uapi/linux/oom.h#L10\nconst OOM_SCORE_ADJ_MAX: i64 = 1000;\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"Info\"))]\npub fn set_cgroup_and_oom_score(pid: u32) -> Result<()> {\n    if pid == 0 {\n        return Ok(());\n    }\n\n    // set cgroup\n    let mut data: Vec<u8> = Vec::new();\n    std::io::stdin()\n        .read_to_end(&mut data)\n        .map_err(io_error!(e, \"read stdin\"))?;\n\n    if !data.is_empty() {\n        let opts =\n            Any::parse_from_bytes(&data).and_then(|any| Options::parse_from_bytes(&any.value))?;\n\n        if !opts.shim_cgroup.is_empty() {\n            add_task_to_cgroup(opts.shim_cgroup.as_str(), pid)?;\n        }\n    }\n\n    // set oom score\n    adjust_oom_score(pid)\n}\n\n/// Add a process to the given relative cgroup path\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"Info\"))]\npub fn add_task_to_cgroup(path: &str, pid: u32) -> Result<()> {\n    let h = hierarchies::auto();\n    // use relative path here, need to trim prefix '/'\n    let path = path.trim_start_matches('/');\n\n    Cgroup::load(h, path)\n        .add_task_by_tgid(CgroupPid::from(pid as u64))\n        .map_err(other_error!(\"add task to cgroup\"))\n}\n\n/// Sets the OOM score for the process to the parents OOM score + 1\n/// to ensure that they parent has a lower score than the shim\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"Info\"))]\npub fn adjust_oom_score(pid: u32) -> Result<()> {\n    let score = read_process_oom_score(std::os::unix::process::parent_id())?;\n    if score < OOM_SCORE_ADJ_MAX {\n        write_process_oom_score(pid, score + 1)?;\n    }\n    Ok(())\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\nfn read_process_oom_score(pid: u32) -> Result<i64> {\n    let content = fs::read_to_string(format!(\"/proc/{}/oom_score_adj\", pid))\n        .map_err(io_error!(e, \"read oom score\"))?;\n    let score = content\n        .trim()\n        .parse::<i64>()\n        .map_err(other_error!(\"parse oom score\"))?;\n    Ok(score)\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\nfn write_process_oom_score(pid: u32, score: i64) -> Result<()> {\n    fs::write(format!(\"/proc/{}/oom_score_adj\", pid), score.to_string())\n        .map_err(io_error!(e, \"write oom score\"))\n}\n\n/// Collect process cgroup stats, return only necessary parts of it\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\npub fn collect_metrics(cgroup: &Cgroup) -> Result<Metrics> {\n    let mut metrics = Metrics::new();\n\n    // to make it easy, fill the necessary metrics only.\n    for sub_system in Cgroup::subsystems(cgroup) {\n        match sub_system {\n            Subsystem::Cpu(cpu_ctr) => {\n                let mut cpu_usage = CPUUsage::new();\n                let mut throttle = Throttle::new();\n                let stat = cpu_ctr.cpu().stat;\n                for line in stat.lines() {\n                    let parts = line.split(' ').collect::<Vec<&str>>();\n                    if parts.len() != 2 {\n                        Err(Error::Other(format!(\"invalid cpu stat line: {}\", line)))?;\n                    }\n\n                    // https://github.com/opencontainers/runc/blob/dbe8434359ca35af1c1e10df42b1f4391c1e1010/libcontainer/cgroups/fs2/cpu.go#L70\n                    match parts[0] {\n                        \"usage_usec\" => {\n                            cpu_usage.set_total(parts[1].parse::<u64>().unwrap());\n                        }\n                        \"user_usec\" => {\n                            cpu_usage.set_user(parts[1].parse::<u64>().unwrap());\n                        }\n                        \"system_usec\" => {\n                            cpu_usage.set_kernel(parts[1].parse::<u64>().unwrap());\n                        }\n                        \"nr_periods\" => {\n                            throttle.set_periods(parts[1].parse::<u64>().unwrap());\n                        }\n                        \"nr_throttled\" => {\n                            throttle.set_throttled_periods(parts[1].parse::<u64>().unwrap());\n                        }\n                        \"throttled_usec\" => {\n                            throttle.set_throttled_time(parts[1].parse::<u64>().unwrap());\n                        }\n                        _ => {}\n                    }\n                }\n                let mut cpu_stats = CPUStat::new();\n                cpu_stats.set_throttling(throttle);\n                cpu_stats.set_usage(cpu_usage);\n                metrics.set_cpu(cpu_stats);\n            }\n            Subsystem::Mem(mem_ctr) => {\n                let mem = mem_ctr.memory_stat();\n                let mut mem_entry = MemoryEntry::new();\n                mem_entry.set_usage(mem.usage_in_bytes);\n                let mut mem_stat = MemoryStat::new();\n                mem_stat.set_usage(mem_entry);\n                mem_stat.set_total_inactive_file(mem.stat.total_inactive_file);\n                metrics.set_memory(mem_stat);\n            }\n            Subsystem::Pid(pid_ctr) => {\n                // ignore cgroup NotFound error\n                let ignore_err = |cr: CgResult<u64>| -> CgResult<u64> {\n                    cr.or_else(|e| {\n                        if e.source()\n                            .and_then(<dyn StdError>::downcast_ref::<std::io::Error>)\n                            .map(std::io::Error::kind)\n                            == Some(std::io::ErrorKind::NotFound)\n                        {\n                            Ok(0)\n                        } else {\n                            Err(e)\n                        }\n                    })\n                };\n\n                let mut pid_stats = PidsStat::new();\n                pid_stats.set_current(\n                    ignore_err(pid_ctr.get_pid_current())\n                        .map_err(other_error!(\"get current pid\"))?,\n                );\n\n                pid_stats.set_limit(\n                    ignore_err(pid_ctr.get_pid_max().map(|val| match val {\n                        // See https://github.com/opencontainers/runc/blob/dbe8434359ca35af1c1e10df42b1f4391c1e1010/libcontainer/cgroups/fs/pids.go#L55\n                        MaxValue::Max => 0,\n                        MaxValue::Value(val) => val as u64,\n                    }))\n                    .map_err(other_error!(\"get pid limit\"))?,\n                );\n                metrics.set_pids(pid_stats)\n            }\n            _ => {}\n        }\n    }\n    Ok(metrics)\n}\n\n// get_cgroup will return either cgroup v1 or v2 depending on system configuration\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\npub fn get_cgroup(pid: u32) -> Result<Cgroup> {\n    let hierarchies = hierarchies::auto();\n    let cgroup = if hierarchies.v2() {\n        let path = get_cgroups_v2_path_by_pid(pid)?;\n        Cgroup::load(hierarchies, path)\n    } else {\n        // get container main process cgroup\n        let path =\n            get_cgroups_relative_paths_by_pid(pid).map_err(other_error!(\"get process cgroup\"))?;\n        Cgroup::load_with_relative_paths(hierarchies::auto(), Path::new(\".\"), path)\n    };\n    Ok(cgroup)\n}\n\n/// Get the cgroups v2 path given a PID\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\npub fn get_cgroups_v2_path_by_pid(pid: u32) -> Result<PathBuf> {\n    // todo: should upstream to cgroups-rs\n    let path = format!(\"/proc/{}/cgroup\", pid);\n    let content = fs::read_to_string(path).map_err(io_error!(e, \"read cgroup\"))?;\n    let content = content.lines().next().unwrap_or(\"\");\n\n    let Ok(path) = parse_cgroups_v2_path(content)?.canonicalize() else {\n        return Err(Error::Other(\"cgroup path not found\".to_string()));\n    };\n    Ok(path)\n}\n\n// https://github.com/opencontainers/runc/blob/1950892f69597aa844cbf000fbdf77610dda3a44/libcontainer/cgroups/fs2/defaultpath.go#L83\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\nfn parse_cgroups_v2_path(content: &str) -> Result<PathBuf> {\n    // the entry for cgroup v2 is always in the format like `0::$PATH`\n    // where 0 is the hierarchy ID, the controller name is omitted in cgroup v2\n    // and $PATH is the cgroup path\n    // see https://docs.kernel.org/admin-guide/cgroup-v2.html\n    let Some(path) = content.strip_prefix(\"0::\") else {\n        return Err(Error::Other(format!(\"invalid cgroup path: {}\", content)));\n    };\n\n    let path = path.trim_start_matches('/');\n\n    Ok(PathBuf::from(format!(\"/sys/fs/cgroup/{}\", path)))\n}\n\n/// Update process cgroup limits\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\npub fn update_resources(cgroup: &Cgroup, resources: &LinuxResources) -> Result<()> {\n    for sub_system in Cgroup::subsystems(cgroup) {\n        match sub_system {\n            Subsystem::Pid(pid_ctr) => {\n                // set maximum number of PIDs\n                if let Some(pids) = resources.pids() {\n                    pid_ctr\n                        .set_pid_max(MaxValue::Value(pids.limit()))\n                        .map_err(other_error!(\"set pid max\"))?;\n                }\n            }\n            Subsystem::Mem(mem_ctr) => {\n                if let Some(memory) = resources.memory() {\n                    //if swap and limit setting have\n                    if let (Some(limit), Some(swap)) = (memory.limit(), memory.swap()) {\n                        //get current memory_limit\n                        let current = mem_ctr.memory_stat().limit_in_bytes;\n                        // if the updated swap value is larger than the current memory limit set the swap changes first\n                        // then set the memory limit as swap must always be larger than the current limit\n                        if current < swap {\n                            mem_ctr\n                                .set_memswap_limit(swap)\n                                .map_err(other_error!(\"set memsw limit\"))?;\n                            mem_ctr\n                                .set_limit(limit)\n                                .map_err(other_error!(\"set mem limit\"))?;\n                        }\n                    }\n                    // set memory limit in bytes\n                    if let Some(limit) = memory.limit() {\n                        mem_ctr\n                            .set_limit(limit)\n                            .map_err(other_error!(\"set mem limit\"))?;\n                    }\n\n                    // set memory swap limit in bytes\n                    if let Some(swap) = memory.swap() {\n                        mem_ctr\n                            .set_memswap_limit(swap)\n                            .map_err(other_error!(\"set memsw limit\"))?;\n                    }\n                }\n            }\n            Subsystem::CpuSet(cpuset_ctr) => {\n                if let Some(cpu) = resources.cpu() {\n                    // set CPUs to use within the cpuset\n                    if let Some(cpus) = cpu.cpus() {\n                        cpuset_ctr\n                            .set_cpus(cpus)\n                            .map_err(other_error!(\"set CPU sets\"))?;\n                    }\n\n                    // set list of memory nodes in the cpuset\n                    if let Some(mems) = cpu.mems() {\n                        cpuset_ctr\n                            .set_mems(mems)\n                            .map_err(other_error!(\"set CPU memes\"))?;\n                    }\n                }\n            }\n            Subsystem::Cpu(cpu_ctr) => {\n                if let Some(cpu) = resources.cpu() {\n                    // set CPU shares\n                    if let Some(shares) = cpu.shares() {\n                        cpu_ctr\n                            .set_shares(shares)\n                            .map_err(other_error!(\"set CPU share\"))?;\n                    }\n\n                    // set CPU hardcap limit\n                    if let Some(quota) = cpu.quota() {\n                        cpu_ctr\n                            .set_cfs_quota(quota)\n                            .map_err(other_error!(\"set CPU quota\"))?;\n                    }\n\n                    // set CPU hardcap period\n                    if let Some(period) = cpu.period() {\n                        cpu_ctr\n                            .set_cfs_period(period)\n                            .map_err(other_error!(\"set CPU period\"))?;\n                    }\n                }\n            }\n            Subsystem::HugeTlb(ht_ctr) => {\n                // set the limit if \"pagesize\" hugetlb usage\n                if let Some(hp_limits) = resources.hugepage_limits() {\n                    for limit in hp_limits {\n                        ht_ctr\n                            .set_limit_in_bytes(limit.page_size().as_str(), limit.limit() as u64)\n                            .map_err(other_error!(\"set huge page limit\"))?;\n                    }\n                }\n            }\n            _ => {}\n        }\n    }\n    Ok(())\n}\n\n#[cfg(test)]\nmod tests {\n    use std::path::PathBuf;\n\n    use cgroups_rs::{\n        fs::{hierarchies, Cgroup},\n        CgroupPid,\n    };\n\n    use super::parse_cgroups_v2_path;\n    use crate::cgroup::{\n        add_task_to_cgroup, adjust_oom_score, read_process_oom_score, OOM_SCORE_ADJ_MAX,\n    };\n\n    #[test]\n    fn test_add_cgroup() {\n        let path = \"runc_shim_test_cgroup\";\n        let h = hierarchies::auto();\n\n        // create cgroup path first\n        let cg = Cgroup::new(h, path).unwrap();\n\n        let pid = std::process::id();\n        add_task_to_cgroup(path, pid).unwrap();\n        let cg_id = CgroupPid::from(pid as u64);\n        assert!(cg.tasks().contains(&cg_id));\n\n        // remove cgroup as possible\n        cg.remove_task_by_tgid(cg_id).unwrap();\n        cg.delete().unwrap()\n    }\n\n    #[test]\n    fn test_adjust_oom_score() {\n        let pid = std::process::id();\n        let score = read_process_oom_score(pid).unwrap();\n\n        adjust_oom_score(pid).unwrap();\n        let new = read_process_oom_score(pid).unwrap();\n        if score < OOM_SCORE_ADJ_MAX {\n            assert_eq!(new, score + 1)\n        } else {\n            assert_eq!(new, OOM_SCORE_ADJ_MAX)\n        }\n    }\n\n    #[test]\n    fn test_parse_cgroups_v2_path() {\n        let path = \"0::/user.slice/user-1000.slice/session-2.scope\";\n        assert_eq!(\n            parse_cgroups_v2_path(path).unwrap(),\n            PathBuf::from(\"/sys/fs/cgroup/user.slice/user-1000.slice/session-2.scope\")\n        );\n    }\n\n    #[test]\n    fn test_parse_cgroups_v2_path_empty() {\n        let path = \"0::\";\n        assert_eq!(\n            parse_cgroups_v2_path(path).unwrap(),\n            PathBuf::from(\"/sys/fs/cgroup/\")\n        );\n    }\n\n    #[test]\n    fn test_parse_cgroups_v2_path_kube() {\n        let path = \"0::/kubepods-besteffort-pod8.slice:cri-containerd:8\";\n        assert_eq!(\n            parse_cgroups_v2_path(path).unwrap(),\n            PathBuf::from(\"/sys/fs/cgroup/kubepods-besteffort-pod8.slice:cri-containerd:8\")\n        );\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/error.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse thiserror::Error;\n\nuse crate::{\n    monitor::ExitEvent,\n    protos::{protobuf, ttrpc},\n};\n\npub type Result<T> = std::result::Result<T, Error>;\n\n#[derive(Debug, Error)]\npub enum Error {\n    /// Invalid command line arguments.\n    #[error(\"Failed to parse command line: {0}\")]\n    InvalidArgument(String),\n\n    /// TTRPC specific error.\n    #[error(\"TTRPC error: {0}\")]\n    Ttrpc(#[from] ttrpc::Error),\n\n    #[error(\"Protobuf error: {0}\")]\n    Protobuf(#[from] protobuf::Error),\n\n    #[error(\"{context} error: {err}\")]\n    IoError {\n        context: String,\n        #[source]\n        err: std::io::Error,\n    },\n\n    #[error(\"Env error: {0}\")]\n    Env(#[from] std::env::VarError),\n\n    #[error(\"Failed to setup logger: {0}\")]\n    Setup(#[from] log::SetLoggerError),\n\n    #[cfg(unix)]\n    #[error(\"Nix error: {0}\")]\n    Nix(#[from] nix::Error),\n\n    #[error(\"Failed to get envelope timestamp: {0}\")]\n    Timestamp(#[from] std::time::SystemTimeError),\n\n    #[error(\"Not Found: {0}\")]\n    NotFoundError(String),\n\n    #[error(\"Failed pre condition: {0}\")]\n    FailedPreconditionError(String),\n\n    #[cfg(unix)]\n    #[error(\"{context} error: {err}\")]\n    MountError {\n        context: String,\n        #[source]\n        err: nix::Error,\n    },\n\n    #[error(\"Failed to convert json object: {0}\")]\n    JSON(#[from] serde_json::Error),\n\n    #[error(\"Failed to parse integer: {0}\")]\n    ParseInt(#[from] std::num::ParseIntError),\n\n    #[error(\"Failed to send exit event: {0}\")]\n    Send(#[from] std::sync::mpsc::SendError<ExitEvent>),\n\n    #[error(\"Deadline exceeded: {0}\")]\n    DeadlineExceeded(String),\n\n    #[error(\"Other: {0}\")]\n    Other(String),\n\n    #[error(\"Unimplemented method: {0}\")]\n    Unimplemented(String),\n}\n\nimpl From<Error> for ttrpc::Error {\n    fn from(e: Error) -> Self {\n        match e {\n            Error::InvalidArgument(ref s) => {\n                ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::INVALID_ARGUMENT, s))\n            }\n            Error::NotFoundError(ref s) => {\n                ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::NOT_FOUND, s))\n            }\n            Error::FailedPreconditionError(ref s) => {\n                ttrpc::Error::RpcStatus(ttrpc::get_status(ttrpc::Code::FAILED_PRECONDITION, s))\n            }\n            Error::Ttrpc(e) => e,\n            _ => ttrpc::Error::Others(e.to_string()),\n        }\n    }\n}\n\n#[macro_export]\nmacro_rules! io_error {\n    ($e:ident, $($args:tt)+) => {\n        |$e| Error::IoError {\n            context: format_args!($($args)+).to_string(),\n            err: $e,\n        }\n    };\n}\n\n#[macro_export]\nmacro_rules! mount_error {\n    ($e:ident, $($args:tt)+) => {\n        |$e| Error::MountError {\n            context: format_args!($($args)+).to_string(),\n            err: $e,\n        }\n    };\n}\n\n#[macro_export]\nmacro_rules! other {\n    ($($args:tt)*) => {\n        Error::Other(format_args!($($args)*).to_string())\n    };\n}\n\n#[macro_export]\nmacro_rules! other_error {\n    ($s:expr) => {\n        |e| Error::Other(format!(\"{}: {}\", $s, e))\n    };\n}\n"
  },
  {
    "path": "crates/shim/src/event.rs",
    "content": "use containerd_shim_protos::{events::task::*, protobuf::MessageDyn};\n\npub trait Event: MessageDyn {\n    fn topic(&self) -> String;\n}\n\nimpl Event for TaskCreate {\n    fn topic(&self) -> String {\n        \"/tasks/create\".to_string()\n    }\n}\n\nimpl Event for TaskStart {\n    fn topic(&self) -> String {\n        \"/tasks/start\".to_string()\n    }\n}\n\nimpl Event for TaskExecAdded {\n    fn topic(&self) -> String {\n        \"/tasks/exec-added\".to_string()\n    }\n}\n\nimpl Event for TaskExecStarted {\n    fn topic(&self) -> String {\n        \"/tasks/exec-started\".to_string()\n    }\n}\n\nimpl Event for TaskPaused {\n    fn topic(&self) -> String {\n        \"/tasks/paused\".to_string()\n    }\n}\n\nimpl Event for TaskResumed {\n    fn topic(&self) -> String {\n        \"/tasks/resumed\".to_string()\n    }\n}\n\nimpl Event for TaskExit {\n    fn topic(&self) -> String {\n        \"/tasks/exit\".to_string()\n    }\n}\n\nimpl Event for TaskDelete {\n    fn topic(&self) -> String {\n        \"/tasks/delete\".to_string()\n    }\n}\n\nimpl Event for TaskOOM {\n    fn topic(&self) -> String {\n        \"/tasks/oom\".to_string()\n    }\n}\n\nimpl Event for TaskCheckpointed {\n    fn topic(&self) -> String {\n        \"/tasks/checkpointed\".to_string()\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/lib.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n#![cfg_attr(feature = \"docs\", doc = include_str!(\"../README.md\"))]\n\nuse std::{fs::File, path::PathBuf};\n#[cfg(windows)]\nuse std::{fs::OpenOptions, os::windows::prelude::OpenOptionsExt};\n#[cfg(unix)]\nuse std::{os::unix::net::UnixListener, path::Path};\n\npub use containerd_shim_protos as protos;\npub use protos::{\n    shim::shim::DeleteResponse,\n    ttrpc::{context::Context, Result as TtrpcResult},\n};\nuse sha2::{Digest, Sha256};\n#[cfg(windows)]\nuse windows_sys::Win32::Storage::FileSystem::FILE_FLAG_OVERLAPPED;\n\n#[cfg(feature = \"async\")]\npub use crate::asynchronous::*;\npub use crate::error::{Error, Result};\n#[cfg(not(feature = \"async\"))]\npub use crate::synchronous::*;\n\n#[macro_use]\npub mod error;\n\nmod args;\npub use args::{parse, Flags};\n#[cfg(feature = \"async\")]\npub mod asynchronous;\npub mod cgroup;\npub mod event;\npub mod logger;\npub mod monitor;\n#[cfg(target_os = \"linux\")]\npub mod mount_linux;\n#[cfg(not(target_os = \"linux\"))]\npub mod mount_other;\n#[cfg(target_os = \"linux\")]\npub use mount_linux as mount;\n#[cfg(not(target_os = \"linux\"))]\npub use mount_other as mount;\nmod reap;\n#[cfg(not(feature = \"async\"))]\npub mod synchronous;\npub mod util;\n\n/// Generated request/response structures.\npub mod api {\n    pub use super::protos::{\n        api::Status,\n        shim::{oci::Options, shim::*},\n        types::empty::Empty,\n    };\n}\n\nmacro_rules! cfg_not_async {\n    ($($item:item)*) => {\n        $(\n            #[cfg(not(feature = \"async\"))]\n            #[cfg_attr(docsrs, doc(cfg(not(feature = \"async\"))))]\n            $item\n        )*\n    }\n}\n\nmacro_rules! cfg_async {\n    ($($item:item)*) => {\n        $(\n            #[cfg(feature = \"async\")]\n            #[cfg_attr(docsrs, doc(cfg(feature = \"async\")))]\n            $item\n        )*\n    }\n}\n\ncfg_not_async! {\n    pub use crate::synchronous::publisher;\n    pub use protos::shim::shim_ttrpc::Task;\n    pub use protos::ttrpc::TtrpcContext;\n}\n\ncfg_async! {\n    pub use crate::asynchronous::publisher;\n    pub use protos::shim_async::Task;\n    pub use protos::ttrpc::r#async::TtrpcContext;\n}\n\nconst TTRPC_ADDRESS: &str = \"TTRPC_ADDRESS\";\n\n/// Config of shim binary options provided by shim implementations\n#[derive(Debug)]\npub struct Config {\n    /// Disables automatic configuration of logrus to use the shim FIFO\n    pub no_setup_logger: bool,\n    // Sets the the default log level. Default is info\n    pub default_log_level: String,\n    /// Disables the shim binary from reaping any child process implicitly\n    pub no_reaper: bool,\n    /// Disables setting the shim as a child subreaper.\n    pub no_sub_reaper: bool,\n}\n\nimpl Default for Config {\n    fn default() -> Self {\n        Self {\n            no_setup_logger: false,\n            default_log_level: \"info\".to_string(),\n            no_reaper: false,\n            no_sub_reaper: false,\n        }\n    }\n}\n\n/// Startup options received from containerd to start new shim instance.\n///\n/// These will be passed via [`Shim::start_shim`] to shim.\n#[derive(Debug, Default)]\npub struct StartOpts {\n    /// ID of the container.\n    pub id: String,\n    /// Binary path to publish events back to containerd.\n    pub publish_binary: String,\n    /// Address of the containerd's main socket.\n    pub address: String,\n    /// TTRPC socket address.\n    pub ttrpc_address: String,\n    /// Namespace for the container.\n    pub namespace: String,\n\n    pub debug: bool,\n}\n\n#[cfg(target_os = \"linux\")]\npub const SOCKET_ROOT: &str = \"/run/containerd\";\n\n#[cfg(target_os = \"macos\")]\npub const SOCKET_ROOT: &str = \"/var/run/containerd\";\n\n#[cfg(target_os = \"windows\")]\npub const SOCKET_ROOT: &str = r\"\\\\.\\pipe\\containerd-containerd\";\n\n/// Make socket path from containerd socket path, namespace and id.\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"Info\"))]\npub fn socket_address(socket_path: &str, namespace: &str, id: &str) -> String {\n    let path = PathBuf::from(socket_path)\n        .join(namespace)\n        .join(id)\n        .display()\n        .to_string();\n    let hash = {\n        let mut hasher = Sha256::new();\n        hasher.update(path);\n        hasher.finalize()\n    };\n    if cfg!(unix) {\n        format!(\"unix://{}/s/{:x}\", SOCKET_ROOT, hash)\n    } else if cfg!(windows) {\n        format!(r\"\\\\.\\pipe\\containerd-shim-{:x}-pipe\", hash)\n    } else {\n        panic!(\"unsupported platform\")\n    }\n}\n\n#[cfg(unix)]\nfn parse_sockaddr(addr: &str) -> &str {\n    if let Some(addr) = addr.strip_prefix(\"unix://\") {\n        return addr;\n    }\n\n    if let Some(addr) = addr.strip_prefix(\"vsock://\") {\n        return addr;\n    }\n\n    addr\n}\n\n#[cfg(windows)]\nfn start_listener(address: &str) -> std::io::Result<()> {\n    let mut opts = OpenOptions::new();\n    opts.read(true)\n        .write(true)\n        .custom_flags(FILE_FLAG_OVERLAPPED);\n    if let Ok(f) = opts.open(address) {\n        info!(\"found existing named pipe: {}\", address);\n        drop(f);\n        return Err(std::io::Error::new(\n            std::io::ErrorKind::AddrInUse,\n            \"address already exists\",\n        ));\n    }\n\n    // windows starts the listener on the second invocation of the shim\n    Ok(())\n}\n\n#[cfg(unix)]\nfn start_listener(address: &str) -> std::io::Result<UnixListener> {\n    let path = parse_sockaddr(address);\n    // Try to create the needed directory hierarchy.\n    if let Some(parent) = Path::new(path).parent() {\n        std::fs::create_dir_all(parent)?;\n    }\n    UnixListener::bind(path)\n}\n\npub struct Console {\n    pub file: File,\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::start_listener;\n\n    #[test]\n    #[cfg(unix)]\n    fn test_start_listener() {\n        let tmpdir = tempfile::tempdir().unwrap();\n        let path = tmpdir.path().to_str().unwrap().to_owned();\n\n        // A little dangerous, may be turned on under controlled environment.\n        //assert!(start_listener(\"/\").is_err());\n        //assert!(start_listener(\"/tmp\").is_err());\n\n        let socket = path + \"/ns1/id1/socket\";\n        let _listener = start_listener(&socket).unwrap();\n        let _listener2 = start_listener(&socket).expect_err(\"socket should already in use\");\n\n        let socket2 = socket + \"/socket\";\n        assert!(start_listener(&socket2).is_err());\n\n        let path = tmpdir.path().to_str().unwrap().to_owned();\n        let txt_file = path + \"demo.txt\";\n        std::fs::write(&txt_file, \"test\").unwrap();\n        assert!(start_listener(&txt_file).is_err());\n        let context = std::fs::read_to_string(&txt_file).unwrap();\n        assert_eq!(context, \"test\");\n    }\n\n    #[test]\n    #[cfg(windows)]\n    fn test_start_listener_windows() {\n        use mio::windows::NamedPipe;\n\n        let named_pipe = \"\\\\\\\\.\\\\pipe\\\\test-pipe-duplicate\".to_string();\n\n        start_listener(&named_pipe).unwrap();\n        let _pipe_server = NamedPipe::new(named_pipe.clone()).unwrap();\n        start_listener(&named_pipe).expect_err(\"address already exists\");\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/logger.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{\n    borrow::BorrowMut,\n    fmt::Write as fmtwrite,\n    fs::{File, OpenOptions},\n    io::{self, Write},\n    path::Path,\n    str::FromStr,\n    sync::Mutex,\n};\n\nuse log::{\n    kv::{self, Visitor},\n    Metadata, Record,\n};\nuse time::{format_description::well_known::Rfc3339, OffsetDateTime};\n\nuse crate::error::Error;\n\npub const LOG_ENV: &str = \"RUST_LOG\";\n\npub struct FifoLogger {\n    file: Mutex<File>,\n}\n\nimpl FifoLogger {\n    pub fn new(_namespace: &str, _id: &str) -> io::Result<FifoLogger> {\n        #[cfg(unix)]\n        let logger = Self::with_path(\"log\")?;\n\n        #[cfg(windows)]\n        let logger = {\n            let pipe_name = format!(r\"\\\\.\\pipe\\containerd-shim-{_namespace}-{_id}-log\");\n            Self::with_named_pipe(&pipe_name)?\n        };\n\n        Ok(logger)\n    }\n\n    #[allow(dead_code)]\n    pub fn with_path(path: impl AsRef<Path>) -> io::Result<FifoLogger> {\n        let f = OpenOptions::new()\n            .write(true)\n            .read(false)\n            .create(false)\n            .open(path)?;\n\n        Ok(FifoLogger::with_file(f))\n    }\n\n    pub fn with_file(file: File) -> FifoLogger {\n        let file = Mutex::new(file);\n        FifoLogger { file }\n    }\n\n    #[cfg(windows)]\n    pub fn with_named_pipe(name: &str) -> io::Result<FifoLogger> {\n        // Containerd on windows expects the log to be a named pipe in the format of \\\\.\\pipe\\containerd-<namespace>-<id>-log\n        // There is an assumption that there is always only one client connected which is containerd.\n        // If there is a restart of containerd then logs during that time period will be lost.\n        //\n        // https://github.com/containerd/containerd/blob/v1.7.0/runtime/v2/shim_windows.go#L77\n        // https://github.com/microsoft/hcsshim/blob/5871d0c4436f131c377655a3eb09fc9b5065f11d/cmd/containerd-shim-runhcs-v1/serve.go#L132-L137\n\n        use std::os::windows::io::{AsRawHandle, BorrowedHandle};\n\n        use mio::{windows::NamedPipe, Events, Interest, Poll, Token};\n\n        let mut pipe_server = NamedPipe::new(name)?;\n\n        let file = unsafe { BorrowedHandle::borrow_raw(pipe_server.as_raw_handle()) }\n            .try_clone_to_owned()?;\n        let file = File::from(file);\n\n        let poll = Poll::new()?;\n        poll.registry().register(\n            &mut pipe_server,\n            Token(0),\n            Interest::READABLE | Interest::WRITABLE,\n        )?;\n\n        std::thread::spawn(move || {\n            let pipe_server = pipe_server;\n            let mut poll = poll;\n            let mut events = Events::with_capacity(128);\n            let _ = pipe_server.connect();\n            loop {\n                poll.poll(&mut events, None).unwrap();\n\n                for event in events.iter() {\n                    if event.is_writable() {\n                        match pipe_server.connect() {\n                            Ok(()) => {}\n                            Err(e) if e.kind() == io::ErrorKind::Interrupted => {\n                                // this would block just keep processing\n                            }\n                            Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {\n                                // this would block just keep processing\n                            }\n                            Err(e) => {\n                                panic!(\"Error connecting to client: {}\", e);\n                            }\n                        };\n                    }\n                    if event.is_readable() {\n                        pipe_server.disconnect().unwrap();\n                    }\n                }\n            }\n        });\n\n        Ok(FifoLogger::with_file(file))\n    }\n}\n\npub(crate) struct SimpleWriteVistor {\n    key_values: String,\n}\n\nimpl<'kvs> Visitor<'kvs> for SimpleWriteVistor {\n    fn visit_pair(&mut self, k: kv::Key<'kvs>, v: kv::Value<'kvs>) -> Result<(), kv::Error> {\n        write!(&mut self.key_values, \" {}=\\\"{}\\\"\", k, v)?;\n        Ok(())\n    }\n}\n\nimpl SimpleWriteVistor {\n    pub(crate) fn new() -> SimpleWriteVistor {\n        SimpleWriteVistor {\n            key_values: String::new(),\n        }\n    }\n\n    pub(crate) fn as_str(&self) -> &str {\n        &self.key_values\n    }\n}\n\nimpl log::Log for FifoLogger {\n    fn enabled(&self, metadata: &Metadata) -> bool {\n        metadata.level() <= log::max_level()\n    }\n\n    fn log(&self, record: &Record) {\n        if self.enabled(record.metadata()) {\n            let mut guard = self.file.lock().unwrap();\n\n            // collect key_values but don't fail if error parsing\n            let mut writer = SimpleWriteVistor::new();\n            let _ = record.key_values().visit(&mut writer);\n\n            // The logger server may have temporarily shutdown, ignore the error instead of panic.\n            //\n            // Manual for pipe/FIFO: https://man7.org/linux/man-pages/man7/pipe.7.html\n            // If all file descriptors referring to the read end of a pipe have been closed, then\n            // a write(2) will cause a SIGPIPE signal to be generated for the calling process.\n            // If the calling process is ignoring this signal, then write(2) fails with the error\n            // EPIPE.\n            let _ = writeln!(\n                guard.borrow_mut(),\n                \"time=\\\"{}\\\" level={}{} msg=\\\"{}\\\"\\n\",\n                rfc3339_formated(),\n                record.level().as_str().to_lowercase(),\n                writer.as_str(),\n                record.args()\n            );\n        }\n    }\n\n    fn flush(&self) {\n        // The logger server may have temporarily shutdown, ignore the error instead of panic.\n        let _ = self.file.lock().unwrap().flush();\n    }\n}\n\npub fn init(debug: bool, default_log_level: &str, namespace: &str, id: &str) -> Result<(), Error> {\n    let logger = FifoLogger::new(namespace, id).map_err(io_error!(e, \"failed to init logger\"))?;\n    configure_logging_level(debug, default_log_level);\n    log::set_boxed_logger(Box::new(logger))?;\n    Ok(())\n}\n\nfn configure_logging_level(debug: bool, default_log_level: &str) {\n    let debug_level = std::env::var(LOG_ENV).unwrap_or(default_log_level.to_string());\n    let debug_level = log::LevelFilter::from_str(&debug_level).unwrap_or(log::LevelFilter::Info);\n    let level = if debug && log::LevelFilter::Debug > debug_level {\n        log::LevelFilter::Debug\n    } else {\n        debug_level\n    };\n    log::set_max_level(level);\n}\n\npub(crate) fn rfc3339_formated() -> String {\n    OffsetDateTime::now_utc()\n        .format(&Rfc3339)\n        .unwrap_or(OffsetDateTime::now_utc().to_string())\n}\n\n#[cfg(test)]\nmod tests {\n    use std::fs;\n\n    use log::{Log, Record};\n\n    use super::*;\n    use crate::Config;\n\n    #[test]\n    fn test_init_log_level() -> Result<(), Error> {\n        let config = Config::default();\n\n        configure_logging_level(false, &config.default_log_level);\n        assert_eq!(log::LevelFilter::Info, log::max_level());\n\n        // Default for debug flag from containerd\n        configure_logging_level(true, &config.default_log_level);\n        assert_eq!(log::LevelFilter::Debug, log::max_level());\n\n        // ENV different than default\n        std::env::set_var(LOG_ENV, \"error\");\n        configure_logging_level(false, &config.default_log_level);\n        assert_eq!(log::LevelFilter::Error, log::max_level());\n\n        std::env::set_var(LOG_ENV, \"warn\");\n        configure_logging_level(false, &config.default_log_level);\n        assert_eq!(log::LevelFilter::Warn, log::max_level());\n\n        std::env::set_var(LOG_ENV, \"off\");\n        configure_logging_level(false, &config.default_log_level);\n        assert_eq!(log::LevelFilter::Off, log::max_level());\n\n        std::env::set_var(LOG_ENV, \"trace\");\n        configure_logging_level(false, &config.default_log_level);\n        assert_eq!(log::LevelFilter::Trace, log::max_level());\n\n        std::env::set_var(LOG_ENV, \"debug\");\n        configure_logging_level(false, &config.default_log_level);\n\n        // ENV Different than default from debug flag\n        configure_logging_level(true, &config.default_log_level);\n        assert_eq!(log::LevelFilter::Debug, log::max_level());\n\n        std::env::set_var(LOG_ENV, \"trace\");\n        configure_logging_level(true, &config.default_log_level);\n        assert_eq!(log::LevelFilter::Trace, log::max_level());\n\n        std::env::set_var(LOG_ENV, \"info\");\n        configure_logging_level(true, &config.default_log_level);\n        assert_eq!(log::LevelFilter::Debug, log::max_level());\n\n        std::env::set_var(LOG_ENV, \"off\");\n        configure_logging_level(true, &config.default_log_level);\n        assert_eq!(log::LevelFilter::Debug, log::max_level());\n        Ok(())\n    }\n\n    #[test]\n    fn test_fifo_log() {\n        #[cfg(unix)]\n        use nix::{sys::stat, unistd};\n\n        let tmpdir = tempfile::tempdir().unwrap();\n        let path = tmpdir.path().to_str().unwrap().to_owned() + \"/log\";\n\n        #[cfg(unix)]\n        unistd::mkfifo(Path::new(&path), stat::Mode::S_IRWXU).unwrap();\n\n        #[cfg(windows)]\n        File::create(path.clone()).unwrap();\n\n        let path1 = path.clone();\n        let thread = std::thread::spawn(move || {\n            let _fifo = OpenOptions::new()\n                .write(false)\n                .read(true)\n                .create(false)\n                .open(path1)\n                .unwrap();\n        });\n\n        let logger = FifoLogger::with_path(&path).unwrap();\n        //log::set_boxed_logger(Box::new(logger)).map_err(Error::Setup)?;\n        log::set_max_level(log::LevelFilter::Info);\n        thread.join().unwrap();\n\n        let kvs: &[(&str, i32)] = &[(\"a\", 1), (\"b\", 2)];\n        let record = Record::builder()\n            .level(log::Level::Error)\n            .line(Some(1))\n            .file(Some(\"sample file\"))\n            .key_values(&kvs)\n            .build();\n        logger.log(&record);\n        logger.flush();\n    }\n\n    #[test]\n    fn test_supports_structured_logging() {\n        let tmpdir = tempfile::tempdir().unwrap();\n        let path = tmpdir.path().to_str().unwrap().to_owned() + \"/log\";\n        File::create(path.clone()).unwrap();\n\n        let logger = FifoLogger::with_path(&path).unwrap();\n        log::set_max_level(log::LevelFilter::Info);\n\n        let record = Record::builder()\n            .level(log::Level::Info)\n            .args(format_args!(\"no keys\"))\n            .build();\n        logger.log(&record);\n        logger.flush();\n\n        let contents = fs::read_to_string(path.clone()).unwrap();\n        assert!(contents.contains(\"level=info msg=\\\"no keys\\\"\"));\n\n        let kvs: &[(&str, i32)] = &[(\"key\", 1), (\"b\", 2)];\n        let record = Record::builder()\n            .level(log::Level::Error)\n            .key_values(&kvs)\n            .args(format_args!(\"structured!\"))\n            .build();\n        logger.log(&record);\n        logger.flush();\n\n        let contents = fs::read_to_string(path).unwrap();\n        assert!(contents.contains(\"level=error key=\\\"1\\\" b=\\\"2\\\" msg=\\\"structured!\\\"\"));\n    }\n}\n\n#[cfg(all(windows, test))]\nmod windows_tests {\n    use std::{\n        fs::OpenOptions,\n        io::Read,\n        os::windows::{\n            fs::OpenOptionsExt,\n            io::{FromRawHandle, IntoRawHandle},\n            prelude::AsRawHandle,\n        },\n        time::Duration,\n    };\n\n    use log::{Log, Record};\n    use mio::{windows::NamedPipe, Events, Interest, Poll, Token};\n    use windows_sys::Win32::{\n        Foundation::ERROR_PIPE_NOT_CONNECTED, Storage::FileSystem::FILE_FLAG_OVERLAPPED,\n    };\n\n    use super::*;\n\n    #[test]\n    fn test_namedpipe_log_can_write_before_client_connected() {\n        let ns = \"test\".to_string();\n        let id = \"notconnected\".to_string();\n        let logger = FifoLogger::new(&ns, &id).unwrap();\n\n        // test can write before a reader is connected (should succeed but the messages will be dropped)\n        log::set_max_level(log::LevelFilter::Info);\n        let record = Record::builder()\n            .level(log::Level::Info)\n            .line(Some(1))\n            .file(Some(\"sample file\"))\n            .args(format_args!(\"hello\"))\n            .build();\n        logger.log(&record);\n        logger.flush();\n    }\n\n    #[test]\n    fn test_namedpipe_log() {\n        use std::fs::File;\n\n        let ns = \"test\".to_string();\n        let id = \"clients\".to_string();\n        let pipe_name = format!(\"\\\\\\\\.\\\\pipe\\\\containerd-shim-{}-{}-log\", ns, id);\n\n        let logger = FifoLogger::new(&ns, &id).unwrap();\n        let mut client = create_client(pipe_name.as_str());\n\n        log::set_max_level(log::LevelFilter::Info);\n        let kvs: &[(&str, i32)] = &[(\"key\", 1), (\"b\", 2)];\n        let record = Record::builder()\n            .level(log::Level::Info)\n            .line(Some(1))\n            .key_values(&kvs)\n            .args(format_args!(\"hello\"))\n            .build();\n        logger.log(&record);\n        logger.flush();\n\n        let buf = read_message(&mut client, 73);\n        let message = std::str::from_utf8(&buf).unwrap();\n        assert!(message.starts_with(\"time=\\\"\"), \"message was: {:?}\", message);\n        assert!(\n            message.contains(\"level=info key=\\\"1\\\" b=\\\"2\\\" msg=\\\"hello\\\"\\n\"),\n            \"message was: {:?}\",\n            message\n        );\n\n        // test that we can reconnect after a reader disconnects\n        // we need to get the raw handle and drop that as well to force full disconnect\n        // and give a few milliseconds for the disconnect to happen\n        println!(\"dropping client\");\n        let handle = client.as_raw_handle();\n        drop(client);\n        let f = unsafe { File::from_raw_handle(handle) };\n        drop(f);\n        std::thread::sleep(Duration::from_millis(100));\n\n        let mut client2 = create_client(pipe_name.as_str());\n        logger.log(&record);\n        logger.flush();\n\n        read_message(&mut client2, 51);\n    }\n\n    fn read_message(client: &mut NamedPipe, length: usize) -> Vec<u8> {\n        let mut poll = Poll::new().unwrap();\n        poll.registry()\n            .register(client, Token(1), Interest::READABLE)\n            .unwrap();\n        let mut events = Events::with_capacity(128);\n        let mut buf = vec![0; length];\n        loop {\n            poll.poll(&mut events, Some(Duration::from_millis(10)))\n                .unwrap();\n            match client.read(&mut buf) {\n                Ok(0) => {\n                    panic!(\"Read no bytes from pipe\")\n                }\n                Ok(_) => {\n                    break;\n                }\n                Err(e) if e.raw_os_error() == Some(ERROR_PIPE_NOT_CONNECTED as i32) => {\n                    panic!(\"not connected to the pipe\");\n                }\n                Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {\n                    continue;\n                }\n                Err(e) => panic!(\"Error reading from pipe: {}\", e),\n            }\n        }\n        buf.to_vec()\n    }\n\n    fn create_client(pipe_name: &str) -> mio::windows::NamedPipe {\n        let mut opts = OpenOptions::new();\n        opts.read(true)\n            .write(true)\n            .custom_flags(FILE_FLAG_OVERLAPPED);\n        let file = opts.open(pipe_name).unwrap();\n\n        unsafe { NamedPipe::from_raw_handle(file.into_raw_handle()) }\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/monitor.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\nuse std::fmt;\n\n#[cfg(feature = \"async\")]\npub use crate::asynchronous::monitor::*;\n#[cfg(not(feature = \"async\"))]\npub use crate::synchronous::monitor::*;\n\n#[derive(Clone, Eq, Hash, PartialEq)]\npub enum Topic {\n    Pid,\n    Exec,\n    All,\n}\n\n#[derive(Debug)]\npub struct ExitEvent {\n    // what kind of a thing exit\n    pub subject: Subject,\n    pub exit_code: i32,\n}\n\nimpl fmt::Display for ExitEvent {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        match &self.subject {\n            Subject::Pid(pid) => {\n                write!(f, \"PID {} exit with code {}\", pid, self.exit_code)\n            }\n            Subject::Exec(cid, eid) => {\n                write!(\n                    f,\n                    \"EXEC process {} inside {} exit with code {}\",\n                    eid, cid, self.exit_code\n                )\n            }\n        }\n    }\n}\n\n#[derive(Clone, Debug)]\npub enum Subject {\n    // process pid\n    Pid(i32),\n    // exec with containerd id and exec id for vm container,\n    // if exec is empty, then the event is for the container\n    Exec(String, String),\n}\n"
  },
  {
    "path": "crates/shim/src/mount_linux.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\nuse std::{\n    collections::HashMap,\n    env,\n    fs::File,\n    io::BufRead,\n    ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not},\n    os::fd::AsRawFd,\n    path::Path,\n    sync::LazyLock,\n};\n\n#[cfg(not(feature = \"async\"))]\nuse log::error;\nuse nix::mount::{mount, MntFlags, MsFlags};\n#[cfg(feature = \"async\")]\nuse nix::sched::{unshare, CloneFlags};\n#[cfg(not(feature = \"async\"))]\nuse nix::unistd::{fork, ForkResult};\n\nuse crate::error::{Error, Result};\n#[cfg(not(feature = \"async\"))]\nuse crate::monitor::{monitor_subscribe, wait_pid, Topic};\n\nstruct Flag {\n    clear: bool,\n    flags: MsFlags,\n}\n\n#[cfg(target_os = \"linux\")]\n#[derive(Debug, Default)]\npub struct LoopParams {\n    readonly: bool,\n    auto_clear: bool,\n    direct: bool,\n}\n\n#[repr(C)]\n#[derive(Debug)]\npub struct LoopInfo {\n    device: u64,\n    inode: u64,\n    rdevice: u64,\n    offset: u64,\n    size_limit: u64,\n    number: u32,\n    encrypt_type: u32,\n    encrypt_key_size: u32,\n    flags: u32,\n    file_name: [u8; 64],\n    crypt_name: [u8; 64],\n    encrypt_key: [u8; 32],\n    init: [u64; 2],\n}\n\nimpl Default for LoopInfo {\n    fn default() -> Self {\n        LoopInfo {\n            device: 0,\n            inode: 0,\n            rdevice: 0,\n            offset: 0,\n            size_limit: 0,\n            number: 0,\n            encrypt_type: 0,\n            encrypt_key_size: 0,\n            flags: 0,\n            file_name: [0; 64],\n            crypt_name: [0; 64],\n            encrypt_key: [0; 32],\n            init: [0; 2],\n        }\n    }\n}\n\nconst LOOP_CONTROL_PATH: &str = \"/dev/loop-control\";\n#[cfg(feature = \"async\")]\nconst LOOP_DEV_FORMAT: &str = \"/dev/loop\";\n#[cfg(feature = \"async\")]\nconst EBUSY_STRING: &str = \"device or resource busy\";\nconst OVERLAY_LOWERDIR_PREFIX: &str = \"lowerdir=\";\n\n#[allow(dead_code)]\n#[derive(Debug, Default, Clone)]\nstruct MountInfo {\n    /// id is a unique identifier of the mount (may be reused after umount).\n    pub id: u32,\n    /// parent is the ID of the parent mount (or of self for the root\n    /// of this mount namespace's mount tree).\n    pub parent: u32,\n    /// major and minor are the major and the minor components of the Dev\n    /// field of unix.Stat_t structure returned by unix.*Stat calls for\n    /// files on this filesystem.\n    pub major: u32,\n    pub minor: u32,\n    /// root is the pathname of the directory in the filesystem which forms\n    /// the root of this mount.\n    pub root: String,\n    /// mountpoint is the pathname of the mount point relative to the\n    /// process's root directory.\n    pub mountpoint: String,\n    /// options is a comma-separated list of mount options.\n    pub options: String,\n    /// optional are zero or more fields of the form \"tag[:value]\",\n    /// separated by a space.  Currently, the possible optional fields are\n    /// \"shared\", \"master\", \"propagate_from\", and \"unbindable\". For more\n    /// information, see mount_namespaces(7) Linux man page.\n    pub optional: String,\n    /// fs_type is the filesystem type in the form \"type[.subtype]\".\n    pub fs_type: String,\n    /// source is filesystem-specific information, or \"none\".\n    pub source: String,\n    /// vfs_options is a comma-separated list of superblock options.\n    pub vfs_options: String,\n}\n\nstatic MOUNT_FLAGS: LazyLock<HashMap<&'static str, Flag>> = LazyLock::new(|| {\n    let mut mf = HashMap::new();\n    let zero: MsFlags = MsFlags::empty();\n    mf.insert(\n        \"async\",\n        Flag {\n            clear: true,\n            flags: MsFlags::MS_SYNCHRONOUS,\n        },\n    );\n    mf.insert(\n        \"atime\",\n        Flag {\n            clear: true,\n            flags: MsFlags::MS_NOATIME,\n        },\n    );\n    mf.insert(\n        \"bind\",\n        Flag {\n            clear: false,\n            flags: MsFlags::MS_BIND,\n        },\n    );\n    mf.insert(\n        \"defaults\",\n        Flag {\n            clear: false,\n            flags: zero,\n        },\n    );\n    mf.insert(\n        \"dev\",\n        Flag {\n            clear: true,\n            flags: MsFlags::MS_NODEV,\n        },\n    );\n    mf.insert(\n        \"diratime\",\n        Flag {\n            clear: true,\n            flags: MsFlags::MS_NODIRATIME,\n        },\n    );\n    mf.insert(\n        \"dirsync\",\n        Flag {\n            clear: false,\n            flags: MsFlags::MS_DIRSYNC,\n        },\n    );\n    mf.insert(\n        \"exec\",\n        Flag {\n            clear: true,\n            flags: MsFlags::MS_NOEXEC,\n        },\n    );\n    mf.insert(\n        \"mand\",\n        Flag {\n            clear: false,\n            flags: MsFlags::MS_MANDLOCK,\n        },\n    );\n    mf.insert(\n        \"noatime\",\n        Flag {\n            clear: false,\n            flags: MsFlags::MS_NOATIME,\n        },\n    );\n    mf.insert(\n        \"nodev\",\n        Flag {\n            clear: false,\n            flags: MsFlags::MS_NODEV,\n        },\n    );\n    mf.insert(\n        \"nodiratime\",\n        Flag {\n            clear: false,\n            flags: MsFlags::MS_NODIRATIME,\n        },\n    );\n    mf.insert(\n        \"noexec\",\n        Flag {\n            clear: false,\n            flags: MsFlags::MS_NOEXEC,\n        },\n    );\n    mf.insert(\n        \"nomand\",\n        Flag {\n            clear: true,\n            flags: MsFlags::MS_MANDLOCK,\n        },\n    );\n    mf.insert(\n        \"norelatime\",\n        Flag {\n            clear: true,\n            flags: MsFlags::MS_RELATIME,\n        },\n    );\n    mf.insert(\n        \"nostrictatime\",\n        Flag {\n            clear: true,\n            flags: MsFlags::MS_STRICTATIME,\n        },\n    );\n    mf.insert(\n        \"nosuid\",\n        Flag {\n            clear: false,\n            flags: MsFlags::MS_NOSUID,\n        },\n    );\n    mf.insert(\n        \"rbind\",\n        Flag {\n            clear: false,\n            flags: MsFlags::MS_BIND.union(MsFlags::MS_REC),\n        },\n    );\n    mf.insert(\n        \"relatime\",\n        Flag {\n            clear: false,\n            flags: MsFlags::MS_RELATIME,\n        },\n    );\n    mf.insert(\n        \"remount\",\n        Flag {\n            clear: false,\n            flags: MsFlags::MS_REMOUNT,\n        },\n    );\n    mf.insert(\n        \"ro\",\n        Flag {\n            clear: false,\n            flags: MsFlags::MS_RDONLY,\n        },\n    );\n    mf.insert(\n        \"rw\",\n        Flag {\n            clear: true,\n            flags: MsFlags::MS_RDONLY,\n        },\n    );\n    mf.insert(\n        \"strictatime\",\n        Flag {\n            clear: false,\n            flags: MsFlags::MS_STRICTATIME,\n        },\n    );\n    mf.insert(\n        \"suid\",\n        Flag {\n            clear: true,\n            flags: MsFlags::MS_NOSUID,\n        },\n    );\n    mf.insert(\n        \"sync\",\n        Flag {\n            clear: false,\n            flags: MsFlags::MS_SYNCHRONOUS,\n        },\n    );\n    mf\n});\n\nconst PROPAGATION_TYPES: MsFlags = MsFlags::MS_SHARED\n    .union(MsFlags::MS_PRIVATE)\n    .union(MsFlags::MS_SLAVE)\n    .union(MsFlags::MS_UNBINDABLE);\n\nconst MS_PROPAGATION: MsFlags = PROPAGATION_TYPES\n    .union(MsFlags::MS_REC)\n    .union(MsFlags::MS_SILENT);\n\nconst MS_BIND_RO: MsFlags = MsFlags::MS_BIND.union(MsFlags::MS_RDONLY);\n\nfn page_size() -> usize {\n    let ret = unsafe { libc::sysconf(libc::_SC_PAGESIZE) };\n    assert!(ret > 0, \"sysconf(_SC_PAGESIZE) failed\");\n    ret as usize\n}\n\nfn options_size(options: &[String]) -> usize {\n    options.iter().fold(0, |sum, x| sum + x.len())\n}\n\nfn longest_common_prefix(dirs: &[String]) -> &str {\n    if dirs.is_empty() {\n        return \"\";\n    }\n\n    let first_dir = &dirs[0];\n\n    for (i, byte) in first_dir.as_bytes().iter().enumerate() {\n        for dir in dirs {\n            if dir.as_bytes().get(i) != Some(byte) {\n                let mut end = i;\n                // guaranteed not to underflow since is_char_boundary(0) is always true\n                while !first_dir.is_char_boundary(end) {\n                    end -= 1;\n                }\n\n                return &first_dir[0..end];\n            }\n        }\n    }\n\n    first_dir\n}\n\n// NOTE: the snapshot id is based on digits.\n// in order to avoid to get snapshots/x, should be back to parent dir.\n// however, there is assumption that the common dir is ${root}/io.containerd.v1.overlayfs/snapshots.\nfn trim_flawed_dir(s: &str) -> String {\n    s[0..s.rfind('/').unwrap_or(0) + 1].to_owned()\n}\n\n#[derive(Default)]\nstruct LowerdirCompactor {\n    options: Vec<String>,\n    lowerdirs: Option<Vec<String>>,\n    lowerdir_prefix: Option<String>,\n}\n\nimpl LowerdirCompactor {\n    fn new(options: &[String]) -> Self {\n        Self {\n            options: options.to_vec(),\n            ..Self::default()\n        }\n    }\n\n    fn lowerdirs(&mut self) -> &mut Self {\n        self.lowerdirs = Some(\n            self.options\n                .iter()\n                .filter(|x| x.starts_with(OVERLAY_LOWERDIR_PREFIX))\n                .map(|x| x.strip_prefix(OVERLAY_LOWERDIR_PREFIX).unwrap_or(x))\n                .flat_map(|x| x.split(':'))\n                .map(str::to_string)\n                .collect(),\n        );\n        self\n    }\n\n    fn lowerdir_prefix(&mut self) -> &mut Self {\n        self.lowerdir_prefix = self\n            .lowerdirs\n            .as_ref()\n            .filter(|x| x.len() > 1)\n            .map(|x| longest_common_prefix(x))\n            .map(trim_flawed_dir)\n            .filter(|x| !x.is_empty() && x != \"/\");\n        self\n    }\n\n    fn compact(&mut self) -> (Option<String>, Vec<String>) {\n        self.lowerdirs().lowerdir_prefix();\n        if let Some(chdir) = &self.lowerdir_prefix {\n            let lowerdir_str = self\n                .lowerdirs\n                .as_ref()\n                .unwrap_or(&Vec::new())\n                .iter()\n                .map(|x| x.strip_prefix(chdir).unwrap_or(x))\n                .collect::<Vec<&str>>()\n                .join(\":\");\n            let replace = |x: &str| -> String {\n                if x.starts_with(OVERLAY_LOWERDIR_PREFIX) {\n                    format!(\"{}{}\", OVERLAY_LOWERDIR_PREFIX, lowerdir_str)\n                } else {\n                    x.to_string()\n                }\n            };\n            (\n                self.lowerdir_prefix.clone(),\n                self.options\n                    .iter()\n                    .map(|x| replace(x))\n                    .collect::<Vec<String>>(),\n            )\n        } else {\n            (None, self.options.to_vec())\n        }\n    }\n}\n\nenum MountExitCode {\n    NixUnknownErr,\n    ChdirErr,\n    Success,\n    NixOtherErr(i32),\n}\n\nimpl From<i32> for MountExitCode {\n    fn from(code: i32) -> Self {\n        match code {\n            -2 => MountExitCode::NixUnknownErr,\n            -1 => MountExitCode::ChdirErr,\n            0 => MountExitCode::Success,\n            _ => MountExitCode::NixOtherErr(code),\n        }\n    }\n}\n\nimpl From<MountExitCode> for i32 {\n    fn from(code: MountExitCode) -> Self {\n        match code {\n            MountExitCode::NixUnknownErr => -2,\n            MountExitCode::ChdirErr => -1,\n            MountExitCode::Success => 0,\n            MountExitCode::NixOtherErr(errno) => errno,\n        }\n    }\n}\n\nimpl From<nix::errno::Errno> for MountExitCode {\n    fn from(err: nix::errno::Errno) -> Self {\n        match err {\n            nix::errno::Errno::UnknownErrno => MountExitCode::NixUnknownErr,\n            _ => MountExitCode::NixOtherErr(err as i32),\n        }\n    }\n}\n\nimpl From<MountExitCode> for nix::errno::Errno {\n    fn from(code: MountExitCode) -> Self {\n        match code {\n            MountExitCode::NixOtherErr(errno) => nix::errno::Errno::from_raw(errno),\n            _ => nix::errno::Errno::UnknownErrno,\n        }\n    }\n}\n\nimpl From<MountExitCode> for Result<()> {\n    fn from(code: MountExitCode) -> Self {\n        match code {\n            MountExitCode::NixUnknownErr => Err(other!(\n                \"mount process exit unexpectedly, exit code: {}\",\n                nix::errno::Errno::from(code)\n            )),\n            MountExitCode::ChdirErr => Err(other!(\"mount process exit unexpectedly: chdir failed\")),\n            MountExitCode::Success => Ok(()),\n            MountExitCode::NixOtherErr(errno) => Err(other!(\n                \"mount process exit unexpectedly, exit code: {}\",\n                nix::errno::Errno::from_raw(errno)\n            )),\n        }\n    }\n}\n\n#[cfg(not(feature = \"async\"))]\npub fn mount_rootfs(\n    fs_type: Option<&str>,\n    source: Option<&str>,\n    options: &[String],\n    target: impl AsRef<Path>,\n) -> Result<()> {\n    //TODO add helper to mount fuse\n    let max_size = page_size();\n    // avoid hitting one page limit of mount argument buffer\n    //\n    // NOTE: 512 id a buffer during pagesize check.\n    let (chdir, options) =\n        if fs_type.unwrap_or(\"\") == \"overlay\" && options_size(options) >= max_size - 512 {\n            LowerdirCompactor::new(options).compact()\n        } else {\n            (None, options.to_vec())\n        };\n\n    let mut flags: MsFlags = MsFlags::empty();\n    let mut data = Vec::new();\n    options.iter().for_each(|x| {\n        if let Some(f) = MOUNT_FLAGS.get(x.as_str()) {\n            if f.clear {\n                flags.bitand_assign(f.flags.not());\n            } else {\n                flags.bitor_assign(f.flags)\n            }\n        } else {\n            data.push(x.as_str())\n        }\n    });\n\n    let opt = data.join(\",\");\n    if opt.len() > max_size {\n        return Err(other!(\"mount option is too long\"));\n    }\n\n    let data = if !data.is_empty() {\n        Some(opt.as_str())\n    } else {\n        None\n    };\n\n    let s = monitor_subscribe(Topic::All)?;\n    match unsafe { fork() } {\n        Ok(ForkResult::Parent { child, .. }) => {\n            let code: MountExitCode = wait_pid(i32::from(child), s).into();\n            code.into()\n        }\n        Ok(ForkResult::Child) => {\n            if let Some(workdir) = chdir {\n                env::set_current_dir(Path::new(&workdir)).unwrap_or_else(|_| {\n                    unsafe { libc::_exit(i32::from(MountExitCode::ChdirErr)) };\n                });\n            }\n            // mount with non-propagation first, or remount with changed data\n            let oflags = flags.bitand(PROPAGATION_TYPES.not());\n            let zero: MsFlags = MsFlags::empty();\n            if flags.bitand(MsFlags::MS_REMOUNT).eq(&zero) || data.is_some() {\n                mount(source, target.as_ref(), fs_type, oflags, data).unwrap_or_else(|err| {\n                    error!(\n                        \"Mount {:?} to {} failed: {}\",\n                        source,\n                        target.as_ref().display(),\n                        err\n                    );\n                    let code: MountExitCode = err.into();\n                    unsafe { libc::_exit(code.into()) };\n                });\n            }\n            // change the propagation type\n            if flags.bitand(PROPAGATION_TYPES).ne(&zero) {\n                mount::<str, Path, str, str>(\n                    None,\n                    target.as_ref(),\n                    None,\n                    flags.bitand(MS_PROPAGATION),\n                    None,\n                )\n                .unwrap_or_else(|err| {\n                    error!(\n                        \"Change {} mount propagation faied: {}\",\n                        target.as_ref().display(),\n                        err\n                    );\n                    let code: MountExitCode = err.into();\n                    unsafe { libc::_exit(code.into()) };\n                });\n            }\n            if oflags.bitand(MS_BIND_RO).eq(&MS_BIND_RO) {\n                mount::<str, Path, str, str>(\n                    None,\n                    target.as_ref(),\n                    None,\n                    oflags.bitor(MsFlags::MS_REMOUNT),\n                    None,\n                )\n                .unwrap_or_else(|err| {\n                    error!(\n                        \"Change {} read-only failed: {}\",\n                        target.as_ref().display(),\n                        err\n                    );\n                    let code: MountExitCode = err.into();\n                    unsafe { libc::_exit(code.into()) };\n                });\n            }\n            unsafe { libc::_exit(i32::from(MountExitCode::Success)) };\n        }\n        Err(_) => Err(other!(\"fork mount process failed\")),\n    }\n}\n\n#[cfg(feature = \"async\")]\npub fn mount_rootfs(\n    fs_type: Option<&str>,\n    source: Option<&str>,\n    options: &[String],\n    target: impl AsRef<Path>,\n) -> Result<()> {\n    //TODO add helper to mount fuse\n    let max_size = page_size();\n    // NOTE: 512 id a buffer during pagesize check.\n    let (chdir, options) =\n        if fs_type.unwrap_or(\"\") == \"overlay\" && options_size(options) >= max_size - 512 {\n            LowerdirCompactor::new(options).compact()\n        } else {\n            (None, options.to_vec())\n        };\n\n    let mut flags: MsFlags = MsFlags::empty();\n    let mut data = Vec::new();\n    let mut lo_setup = false;\n    let mut loop_params = LoopParams::default();\n    options.iter().for_each(|x| {\n        if let Some(f) = MOUNT_FLAGS.get(x.as_str()) {\n            if f.clear {\n                flags.bitand_assign(f.flags.not());\n            } else {\n                flags.bitor_assign(f.flags)\n            }\n        } else if x.as_str() == \"loop\" {\n            lo_setup = true;\n        } else {\n            data.push(x.as_str())\n        }\n    });\n    let opt = data.join(\",\");\n\n    let data = if !data.is_empty() {\n        Some(opt.as_str())\n    } else {\n        None\n    };\n\n    if let Some(workdir) = chdir {\n        unshare(CloneFlags::CLONE_FS)?;\n        env::set_current_dir(Path::new(&workdir)).unwrap_or_else(|_| {\n            unsafe { libc::_exit(i32::from(MountExitCode::ChdirErr)) };\n        });\n    }\n    // mount with non-propagation first, or remount with changed data\n    let oflags = flags.bitand(PROPAGATION_TYPES.not());\n    if lo_setup {\n        loop_params = LoopParams {\n            readonly: oflags.bitand(MsFlags::MS_RDONLY) == MsFlags::MS_RDONLY,\n            auto_clear: true,\n            direct: false,\n        };\n    }\n    let zero: MsFlags = MsFlags::empty();\n    if flags.bitand(MsFlags::MS_REMOUNT).eq(&zero) || data.is_some() {\n        let lo_file: String;\n        let s = if lo_setup {\n            lo_file = setup_loop(source, loop_params)?;\n            Some(lo_file.as_str())\n        } else {\n            source\n        };\n        mount(s, target.as_ref(), fs_type, oflags, data).map_err(mount_error!(\n            e,\n            \"Mount {:?} to {}\",\n            source,\n            target.as_ref().display()\n        ))?;\n    }\n\n    // change the propagation type\n    if flags.bitand(PROPAGATION_TYPES).ne(&zero) {\n        mount::<str, Path, str, str>(None, target.as_ref(), None, MS_PROPAGATION, None).map_err(\n            mount_error!(e, \"Change {} mount propagation\", target.as_ref().display()),\n        )?;\n    }\n\n    if oflags.bitand(MS_BIND_RO).eq(&MS_BIND_RO) {\n        mount::<str, Path, str, str>(\n            None,\n            target.as_ref(),\n            None,\n            oflags.bitor(MsFlags::MS_REMOUNT),\n            None,\n        )\n        .map_err(mount_error!(\n            e,\n            \"Change {} read-only\",\n            target.as_ref().display()\n        ))?;\n    }\n\n    Ok(())\n}\n\n#[cfg(feature = \"async\")]\nfn setup_loop(source: Option<&str>, params: LoopParams) -> Result<String> {\n    let src = source.ok_or(other!(\"loop source is None\"))?;\n    for _ in 0..100 {\n        let num = get_free_loop_dev()?;\n        let loop_dev = format!(\"{}{}\", LOOP_DEV_FORMAT, num);\n        match setup_loop_dev(src, loop_dev.as_str(), &params) {\n            Ok(_) => return Ok(loop_dev),\n            Err(e) => {\n                if e.to_string().contains(EBUSY_STRING) {\n                    continue;\n                } else {\n                    return Err(e);\n                }\n            }\n        }\n    }\n    Err(Error::Other(\n        \"creating new loopback device after 100 times\".to_string(),\n    ))\n}\n\npub fn get_free_loop_dev() -> Result<i32> {\n    const LOOP_CTL_GET_FREE: i32 = 0x4c82;\n    let loop_control = File::options()\n        .read(true)\n        .write(true)\n        .open(LOOP_CONTROL_PATH)\n        .map_err(|e| Error::IoError {\n            context: format!(\"open {} error: \", LOOP_CONTROL_PATH),\n            err: e,\n        })?;\n    unsafe {\n        #[cfg(target_env = \"gnu\")]\n        let ret = libc::ioctl(\n            loop_control.as_raw_fd() as libc::c_int,\n            LOOP_CTL_GET_FREE as libc::c_ulong,\n        ) as i32;\n        #[cfg(target_env = \"musl\")]\n        let ret = libc::ioctl(\n            loop_control.as_raw_fd() as libc::c_int,\n            LOOP_CTL_GET_FREE as libc::c_int,\n        ) as i32;\n        match nix::errno::Errno::result(ret) {\n            Ok(ret) => Ok(ret),\n            Err(e) => Err(Error::Nix(e)),\n        }\n    }\n}\n\npub fn setup_loop_dev(backing_file: &str, loop_dev: &str, params: &LoopParams) -> Result<File> {\n    const LOOP_SET_FD: u32 = 0x4c00;\n    const LOOP_CLR_FD: u32 = 0x4c01;\n    const LOOP_SET_STATUS64: u32 = 0x4c04;\n    const LOOP_SET_DIRECT_IO: u32 = 0x4c08;\n    const LO_FLAGS_READ_ONLY: u32 = 0x1;\n    const LO_FLAGS_AUTOCLEAR: u32 = 0x4;\n    let mut open_options = File::options();\n    open_options.read(true);\n    if !params.readonly {\n        open_options.write(true);\n    }\n    // 1. open backing file\n    let back = open_options\n        .open(backing_file)\n        .map_err(|e| Error::IoError {\n            context: format!(\"open {} error: \", backing_file),\n            err: e,\n        })?;\n    let loop_dev = open_options.open(loop_dev).map_err(|e| Error::IoError {\n        context: format!(\"open {} error: \", loop_dev),\n        err: e,\n    })?;\n    // 2. set FD\n    unsafe {\n        #[cfg(target_env = \"gnu\")]\n        let ret = libc::ioctl(\n            loop_dev.as_raw_fd() as libc::c_int,\n            LOOP_SET_FD as libc::c_ulong,\n            back.as_raw_fd() as libc::c_int,\n        );\n        #[cfg(target_env = \"musl\")]\n        let ret = libc::ioctl(\n            loop_dev.as_raw_fd() as libc::c_int,\n            LOOP_SET_FD as libc::c_int,\n            back.as_raw_fd() as libc::c_int,\n        );\n        if let Err(e) = nix::errno::Errno::result(ret) {\n            return Err(Error::Nix(e));\n        }\n    }\n    // 3. set info\n    let mut info = LoopInfo::default();\n    let backing_file_truncated = if backing_file.len() > info.file_name.len() {\n        &backing_file[0..info.file_name.len()]\n    } else {\n        backing_file\n    };\n    info.file_name[..backing_file_truncated.len()]\n        .copy_from_slice(backing_file_truncated.as_bytes());\n    if params.readonly {\n        info.flags |= LO_FLAGS_READ_ONLY;\n    }\n\n    if params.auto_clear {\n        info.flags |= LO_FLAGS_AUTOCLEAR;\n    }\n    unsafe {\n        #[cfg(target_env = \"gnu\")]\n        let ret = libc::ioctl(\n            loop_dev.as_raw_fd() as libc::c_int,\n            LOOP_SET_STATUS64 as libc::c_ulong,\n            &info,\n        );\n        #[cfg(target_env = \"musl\")]\n        let ret = libc::ioctl(\n            loop_dev.as_raw_fd() as libc::c_int,\n            LOOP_SET_STATUS64 as libc::c_int,\n            &info,\n        );\n        #[cfg(target_env = \"gnu\")]\n        if let Err(e) = nix::errno::Errno::result(ret) {\n            libc::ioctl(\n                loop_dev.as_raw_fd() as libc::c_int,\n                LOOP_CLR_FD as libc::c_ulong,\n                0,\n            );\n            return Err(Error::Nix(e));\n        }\n        #[cfg(target_env = \"musl\")]\n        if let Err(e) = nix::errno::Errno::result(ret) {\n            libc::ioctl(\n                loop_dev.as_raw_fd() as libc::c_int,\n                LOOP_CLR_FD as libc::c_int,\n                0,\n            );\n            return Err(Error::Nix(e));\n        }\n    }\n\n    // 4. Set Direct IO\n    if params.direct {\n        unsafe {\n            #[cfg(target_env = \"gnu\")]\n            let ret = libc::ioctl(\n                loop_dev.as_raw_fd() as libc::c_int,\n                LOOP_SET_DIRECT_IO as libc::c_ulong,\n                1,\n            );\n            #[cfg(target_env = \"musl\")]\n            let ret = libc::ioctl(\n                loop_dev.as_raw_fd() as libc::c_int,\n                LOOP_SET_DIRECT_IO as libc::c_int,\n                1,\n            );\n            if let Err(e) = nix::errno::Errno::result(ret) {\n                #[cfg(target_env = \"gnu\")]\n                libc::ioctl(\n                    loop_dev.as_raw_fd() as libc::c_int,\n                    LOOP_CLR_FD as libc::c_ulong,\n                    0,\n                );\n                #[cfg(target_env = \"musl\")]\n                libc::ioctl(\n                    loop_dev.as_raw_fd() as libc::c_int,\n                    LOOP_CLR_FD as libc::c_int,\n                    0,\n                );\n                return Err(Error::Nix(e));\n            }\n        }\n    }\n    Ok(loop_dev)\n}\n\npub fn umount_recursive(target: Option<&str>, flags: i32) -> Result<()> {\n    if let Some(target) = target {\n        let mut mounts = get_mounts(Some(prefix_filter(target.to_string())))?;\n        mounts.sort_by(|a, b| b.mountpoint.len().cmp(&a.mountpoint.len()));\n        for target in &mounts {\n            umount_all(Some(target.mountpoint.clone()), flags)?;\n        }\n    };\n    Ok(())\n}\n\nfn umount_all(target: Option<String>, flags: i32) -> Result<()> {\n    if let Some(target) = target {\n        if let Err(e) = std::fs::metadata(target.clone()) {\n            if e.kind() == std::io::ErrorKind::NotFound {\n                return Ok(());\n            }\n        }\n        loop {\n            if let Err(e) = nix::mount::umount2(\n                &std::path::PathBuf::from(&target),\n                MntFlags::from_bits(flags).unwrap_or(MntFlags::empty()),\n            ) {\n                if e == nix::errno::Errno::EINVAL {\n                    return Ok(());\n                }\n                return Err(Error::from(e));\n            }\n        }\n    };\n    Ok(())\n}\n\nfn prefix_filter(prefix: String) -> impl Fn(MountInfo) -> bool {\n    move |m: MountInfo| !(m.mountpoint.clone() + \"/\").starts_with(&(prefix.clone() + \"/\"))\n}\n\nfn get_mounts<F>(f: Option<F>) -> Result<Vec<MountInfo>>\nwhere\n    F: Fn(MountInfo) -> bool,\n{\n    let mountinfo_path = \"/proc/self/mountinfo\";\n    let file = std::fs::File::open(mountinfo_path).map_err(io_error!(e, \"io_error\"))?;\n    let reader = std::io::BufReader::new(file);\n    let lines: Vec<String> = reader.lines().map_while(|line| line.ok()).collect();\n    let mount_points = lines\n        .into_iter()\n        .filter_map(|line| {\n            /*\n            See http://man7.org/linux/man-pages/man5/proc.5.html\n            36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue\n            (1)(2)(3)   (4)   (5)      (6)      (7)   (8) (9)   (10)         (11)\n            (1) mount ID:  unique identifier of the mount (may be reused after umount)\n            (2) parent ID:  ID of parent (or of self for the top of the mount tree)\n            (3) major:minor:  value of st_dev for files on filesystem\n            (4) root:  root of the mount within the filesystem\n            (5) mount point:  mount point relative to the process's root\n            (6) mount options:  per mount options\n            (7) optional fields:  zero or more fields of the form \"tag[:value]\"\n            (8) separator:  marks the end of the optional fields\n            (9) filesystem type:  name of filesystem of the form \"type[.subtype]\"\n            (10) mount source:  filesystem specific information or \"none\"\n            (11) super options:  per super block options\n            In other words, we have:\n             * 6 mandatory fields\t(1)..(6)\n             * 0 or more optional fields\t(7)\n             * a separator field\t\t(8)\n             * 3 mandatory fields\t(9)..(11)\n             */\n            let parts: Vec<&str> = line.split_whitespace().collect();\n            if parts.len() < 10 {\n                // mountpoint parse error.\n                return None;\n            }\n            // separator field\n            let mut sep_idx = parts.len() - 4;\n            // In Linux <= 3.9 mounting a cifs with spaces in a share\n            // name (like \"//srv/My Docs\") _may_ end up having a space\n            // in the last field of mountinfo (like \"unc=//serv/My Docs\").\n            // Since kernel 3.10-rc1, cifs option \"unc=\" is ignored,\n            // so spaces should not appear.\n            //\n            // Check for a separator, and work around the spaces bug\n            for i in (0..sep_idx).rev() {\n                if parts[i] == \"-\" {\n                    sep_idx = i;\n                    break;\n                }\n                if sep_idx == 5 {\n                    // mountpoint parse error\n                    return None;\n                }\n            }\n\n            let mut mount_info = MountInfo {\n                id: str::parse::<u32>(parts[0]).ok()?,\n                parent: str::parse::<u32>(parts[1]).ok()?,\n                major: 0,\n                minor: 0,\n                root: parts[3].to_string(),\n                mountpoint: parts[4].to_string(),\n                options: parts[5].to_string(),\n                optional: parts[6..sep_idx].join(\" \"),\n                fs_type: parts[sep_idx + 1].to_string(),\n                source: parts[sep_idx + 2].to_string(),\n                vfs_options: parts[sep_idx + 3].to_string(),\n            };\n            let major_minor = parts[2].splitn(3, ':').collect::<Vec<&str>>();\n            if major_minor.len() != 2 {\n                // mountpoint parse error.\n                return None;\n            }\n            mount_info.major = str::parse::<u32>(major_minor[0]).ok()?;\n            mount_info.minor = str::parse::<u32>(major_minor[1]).ok()?;\n            if let Some(f) = &f {\n                if f(mount_info.clone()) {\n                    // skip this mountpoint. This mountpoint is not the container's mountpoint\n                    return None;\n                }\n            }\n            Some(mount_info)\n        })\n        .collect::<Vec<MountInfo>>();\n    Ok(mount_points)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_trim_flawed_dir() {\n        let mut tcases: Vec<(&str, String)> = Vec::new();\n        tcases.push((\"/\", \"/\".to_string()));\n        tcases.push((\"/foo\", \"/\".to_string()));\n        tcases.push((\"/.foo-_bar/foo\", \"/.foo-_bar/\".to_string()));\n        tcases.push((\"/.foo-_bar/foo/\", \"/.foo-_bar/foo/\".to_string()));\n        tcases.push((\"/.foo-_bar/foo/bar\", \"/.foo-_bar/foo/\".to_string()));\n        tcases.push((\"/.foo-_bar/foo/bar/\", \"/.foo-_bar/foo/bar/\".to_string()));\n        for (case, expected) in tcases {\n            let res = trim_flawed_dir(case);\n            assert_eq!(res, expected);\n        }\n    }\n\n    #[test]\n    fn test_longest_common_prefix() {\n        let mut tcases: Vec<(Vec<String>, String)> = Vec::new();\n        tcases.push((vec![], \"\".to_string()));\n        tcases.push((vec![\"foo\".to_string()], \"foo\".to_string()));\n        tcases.push((vec![\"foo\".to_string(), \"bar\".to_string()], \"\".to_string()));\n        tcases.push((\n            vec![\"foo\".to_string(), \"foo\".to_string()],\n            \"foo\".to_string(),\n        ));\n        tcases.push((\n            vec![\"foo\".to_string(), \"foobar\".to_string()],\n            \"foo\".to_string(),\n        ));\n        tcases.push((\n            vec![\"foo\".to_string(), \"\".to_string(), \"foobar\".to_string()],\n            \"\".to_string(),\n        ));\n        for (case, expected) in tcases {\n            let res = longest_common_prefix(&case);\n            assert_eq!(res, expected);\n        }\n    }\n\n    #[test]\n    fn test_compact_lowerdir_option() {\n        let mut tcases: Vec<(Vec<String>, Option<String>, Vec<String>)> = Vec::new();\n        tcases.push((\n            vec![\"workdir=a\".to_string()],\n            None,\n            vec![\"workdir=a\".to_string()],\n        ));\n        tcases.push((\n            vec![\"workdir=a\".to_string(), \"lowerdir=b\".to_string()],\n            None,\n            vec![\"workdir=a\".to_string(), \"lowerdir=b\".to_string()],\n        ));\n        tcases.push((\n            vec![\"lowerdir=/snapshots/1/fs:/snapshots/10/fs\".to_string()],\n            Some(\"/snapshots/\".to_string()),\n            vec![\"lowerdir=1/fs:10/fs\".to_string()],\n        ));\n        tcases.push((\n            vec![\n                \"workdir=a\".to_string(),\n                \"lowerdir=/snapshots/1/fs:/snapshots/10/fs\".to_string(),\n            ],\n            Some(\"/snapshots/\".to_string()),\n            vec![\"workdir=a\".to_string(), \"lowerdir=1/fs:10/fs\".to_string()],\n        ));\n        tcases.push((\n            vec![\"lowerdir=/snapshots/1/fs:/snapshots/10/fs:/snapshots/2/fs\".to_string()],\n            Some(\"/snapshots/\".to_string()),\n            vec![\"lowerdir=1/fs:10/fs:2/fs\".to_string()],\n        ));\n        tcases.push((\n            vec![\n                \"workdir=a\".to_string(),\n                \"lowerdir=/snapshots/1/fs:/snapshots/10/fs:/snapshots/2/fs\".to_string(),\n            ],\n            Some(\"/snapshots/\".to_string()),\n            vec![\n                \"workdir=a\".to_string(),\n                \"lowerdir=1/fs:10/fs:2/fs\".to_string(),\n            ],\n        ));\n        tcases.push((\n            vec![\"lowerdir=/snapshots/1/fs:/other_snapshots/1/fs\".to_string()],\n            None,\n            vec![\"lowerdir=/snapshots/1/fs:/other_snapshots/1/fs\".to_string()],\n        ));\n        tcases.push((\n            vec![\n                \"workdir=a\".to_string(),\n                \"lowerdir=/snapshots/1/fs:/other_snapshots/1/fs\".to_string(),\n            ],\n            None,\n            vec![\n                \"workdir=a\".to_string(),\n                \"lowerdir=/snapshots/1/fs:/other_snapshots/1/fs\".to_string(),\n            ],\n        ));\n        for (case, expected_chdir, expected_options) in tcases {\n            let (chdir, options) = LowerdirCompactor::new(&case).compact();\n            assert_eq!(chdir, expected_chdir);\n            assert_eq!(options, expected_options);\n        }\n    }\n\n    #[cfg(feature = \"async\")]\n    #[test]\n    fn test_mount_rootfs_umount_recursive() {\n        let target = tempfile::tempdir().expect(\"create target dir error\");\n        let lower1 = tempfile::tempdir().expect(\"create lower1 dir error\");\n        let lower2 = tempfile::tempdir().expect(\"create lower2 dir error\");\n        let upperdir = tempfile::tempdir().expect(\"create upperdir dir error\");\n        let workdir = tempfile::tempdir().expect(\"create workdir dir error\");\n        let options = vec![\n            \"lowerdir=\".to_string()\n                + lower1.path().to_str().expect(\"lower1 path to str error\")\n                + \":\"\n                + lower2.path().to_str().expect(\"lower2 path to str error\"),\n            \"upperdir=\".to_string()\n                + upperdir\n                    .path()\n                    .to_str()\n                    .expect(\"upperdir path to str error\"),\n            \"workdir=\".to_string() + workdir.path().to_str().expect(\"workdir path to str error\"),\n        ];\n        // mount target.\n        let result = mount_rootfs(Some(\"overlay\"), Some(\"overlay\"), &options, &target);\n        assert!(result.is_ok());\n        let mut mountinfo = get_mounts(Some(prefix_filter(\n            target\n                .path()\n                .to_str()\n                .expect(\"target path to str error\")\n                .to_string(),\n        )))\n        .expect(\"get_mounts error\");\n        // make sure the target has been mounted.\n        assert_ne!(0, mountinfo.len());\n        // umount target.\n        let result = umount_recursive(target.path().to_str(), 0);\n        assert!(result.is_ok());\n        mountinfo = get_mounts(Some(prefix_filter(\n            target\n                .path()\n                .to_str()\n                .expect(\"target path to str error\")\n                .to_string(),\n        )))\n        .expect(\"get_mounts error\");\n        // make sure the target has been unmounted.\n        assert_eq!(0, mountinfo.len());\n    }\n\n    #[cfg(feature = \"async\")]\n    #[test]\n    fn test_setup_loop_dev() {\n        let path = tempfile::NamedTempFile::new().expect(\"cannot create tempfile\");\n        let backing_file = path.path().to_str();\n        let params = LoopParams {\n            readonly: false,\n            auto_clear: true,\n            direct: true,\n        };\n        let result = setup_loop(backing_file, params);\n        assert!(result.is_ok());\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/mount_other.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n#![allow(unused)]\n\nuse std::path::Path;\n\nuse crate::error::{Error, Result};\n\npub fn mount_rootfs(\n    fs_type: Option<&str>,\n    source: Option<&str>,\n    options: &[String],\n    target: impl AsRef<Path>,\n) -> Result<()> {\n    // On on-Linux systems, we should return OK\n    // instead of exiting with an error.\n    Ok(())\n}\n\npub fn umount_recursive(target: Option<&str>, flags: i32) -> Result<()> {\n    Ok(())\n}\n"
  },
  {
    "path": "crates/shim/src/reap.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse crate::error::Result;\n\n#[cfg(target_os = \"linux\")]\n/// Set current process as subreaper for child processes.\n///\n/// A subreaper fulfills the role of `init` for its descendant processes.  When a process becomes\n/// orphaned (i.e., its immediate parent terminates), then that process will be reparented to the\n/// nearest still living ancestor subreaper. Subsequently, calls to `getppid()` in the orphaned\n/// process will now return the PID of the subreaper process, and when the orphan terminates,\n/// it is the subreaper process that will receive a SIGCHLD signal and will be able to `wait()`\n/// on the process to discover its termination status.\npub fn set_subreaper() -> Result<()> {\n    use crate::error::Error;\n    let ret = unsafe { libc::prctl(libc::PR_SET_CHILD_SUBREAPER, 1, 0, 0, 0) };\n    if ret < 0 {\n        return Err(other!(\n            \"linux prctl returned: {}\",\n            std::io::Error::last_os_error()\n        ));\n    }\n    Ok(())\n}\n\n#[cfg(not(target_os = \"linux\"))]\npub fn set_subreaper() -> Result<()> {\n    Ok(())\n}\n\n#[cfg(test)]\n#[cfg(target_os = \"linux\")]\nmod tests {\n    use crate::reap::set_subreaper;\n\n    #[test]\n    fn test_set_subreaper() {\n        set_subreaper().unwrap();\n        let mut val: libc::c_int = 0;\n        let ret = unsafe {\n            libc::prctl(\n                libc::PR_GET_CHILD_SUBREAPER,\n                &mut val as *mut libc::c_int as libc::c_ulong,\n                0,\n                0,\n                0,\n            )\n        };\n        assert!(ret >= 0);\n        assert!(val != 0);\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/synchronous/mod.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n//! A library to implement custom runtime v2 shims for containerd.\n//!\n//! # Runtime\n//! Runtime v2 introduces a first class shim API for runtime authors to integrate with containerd.\n//! The shim API is minimal and scoped to the execution lifecycle of a container.\n//!\n//! This crate simplifies shim v2 runtime development for containerd. It handles common tasks such\n//! as command line parsing, setting up shim's TTRPC server, logging, events, etc.\n//!\n//! Clients are expected to implement [Shim] and [Task] traits with task handling routines.\n//! This generally replicates same API as in Go [version](https://github.com/containerd/containerd/blob/main/runtime/v2/example/cmd/main.go).\n//!\n//! Once implemented, shim's bootstrap code is as easy as:\n//! ```text\n//! shim::run::<Service>(\"io.containerd.empty.v1\")\n//! ```\n//!\n\nmacro_rules! cfg_unix {\n    ($($item:item)*) => {\n        $(\n            #[cfg(unix)]\n            $item\n        )*\n    }\n}\n\nmacro_rules! cfg_windows {\n    ($($item:item)*) => {\n        $(\n            #[cfg(windows)]\n            $item\n        )*\n    }\n}\n\nuse std::{\n    env,\n    io::Write,\n    process::{self, Command, Stdio},\n    sync::{Arc, Condvar, Mutex},\n};\n\npub use log::{debug, error, info, warn};\nuse util::{read_address, write_address};\n\nuse crate::{\n    api::DeleteResponse,\n    args::{self, Flags},\n    logger,\n    protos::{\n        protobuf::Message,\n        shim::shim_ttrpc::{create_task, Task},\n        ttrpc::{Client, Server},\n    },\n    reap, socket_address, start_listener,\n    synchronous::publisher::RemotePublisher,\n    Config, Error, Result, StartOpts, TTRPC_ADDRESS,\n};\n\ncfg_unix! {\n    use crate::parse_sockaddr;\n    use libc::{SIGCHLD, SIGINT, SIGPIPE, SIGTERM};\n    use nix::{\n        errno::Errno,\n        sys::{\n            signal::Signal,\n            wait::{self, WaitPidFlag, WaitStatus},\n        },\n        unistd::Pid,\n    };\n    use signal_hook::iterator::Signals;\n    use std::os::unix::fs::FileTypeExt;\n    use std::{convert::TryFrom, fs, path::Path};\n}\n\ncfg_windows! {\n    use std::{\n        io, ptr,\n        fs::OpenOptions,\n        os::windows::prelude::{AsRawHandle, OpenOptionsExt},\n    };\n\n    use windows_sys::Win32::{\n        Foundation::{CloseHandle, HANDLE},\n        System::{\n            Console::SetConsoleCtrlHandler,\n            Threading::{CreateSemaphoreA, ReleaseSemaphore, WaitForSingleObject, INFINITE},\n        },\n        Storage::FileSystem::FILE_FLAG_OVERLAPPED\n    };\n\n    static mut SEMAPHORE: HANDLE = 0 as HANDLE;\n    const MAX_SEM_COUNT: i32 = 255;\n}\n\npub mod monitor;\npub mod publisher;\npub mod util;\n\n/// Helper structure that wraps atomic bool to signal shim server when to shutdown the TTRPC server.\n///\n/// Shim implementations are responsible for calling [`Self::signal`].\n#[allow(clippy::mutex_atomic)] // Condvar expected to be used with Mutex, not AtomicBool.\n#[derive(Default)]\npub struct ExitSignal(Mutex<bool>, Condvar);\n\n// Wrapper type to help hide platform specific signal handling.\nstruct AppSignals {\n    #[cfg(unix)]\n    signals: Signals,\n}\n\n#[allow(clippy::mutex_atomic)]\nimpl ExitSignal {\n    /// Set exit signal to shutdown shim server.\n    pub fn signal(&self) {\n        let (lock, cvar) = (&self.0, &self.1);\n        let mut exit = lock.lock().unwrap();\n        *exit = true;\n        cvar.notify_all();\n    }\n\n    /// Wait for the exit signal to be set.\n    pub fn wait(&self) {\n        let (lock, cvar) = (&self.0, &self.1);\n        let mut started = lock.lock().unwrap();\n        while !*started {\n            started = cvar.wait(started).unwrap();\n        }\n    }\n}\n\n/// Main shim interface that must be implemented by all shims.\n///\n/// Start and delete routines will be called to handle containerd's shim lifecycle requests.\npub trait Shim {\n    /// Type to provide task service for the shim.\n    type T: Task + Send + Sync;\n\n    /// Create a new instance of Shim.\n    ///\n    /// # Arguments\n    /// - `runtime_id`: identifier of the container runtime.\n    /// - `args`: command line arguments passed to the shim which includes namespace and id\n    /// - `config`: for the shim to pass back configuration information\n    fn new(runtime_id: &str, args: &Flags, config: &mut Config) -> Self;\n\n    /// Start shim will be called by containerd when launching new shim instance.\n    ///\n    /// It expected to return TTRPC address containerd daemon can use to communicate with\n    /// the given shim instance.\n    ///\n    /// See <https://github.com/containerd/containerd/tree/master/runtime/v2#start>\n    fn start_shim(&mut self, opts: StartOpts) -> Result<String>;\n\n    /// Delete shim will be called by containerd after shim shutdown to cleanup any leftovers.\n    fn delete_shim(&mut self) -> Result<DeleteResponse>;\n\n    /// Wait for the shim to exit.\n    fn wait(&mut self);\n\n    /// Create the task service object.\n    fn create_task_service(&self, publisher: RemotePublisher) -> Self::T;\n}\n\n/// Shim entry point that must be invoked from `main`.\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\npub fn run<T>(runtime_id: &str, opts: Option<Config>)\nwhere\n    T: Shim + Send + Sync + 'static,\n{\n    if let Some(err) = bootstrap::<T>(runtime_id, opts).err() {\n        eprintln!(\"{}: {:?}\", runtime_id, err);\n        process::exit(1);\n    }\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\nfn bootstrap<T>(runtime_id: &str, opts: Option<Config>) -> Result<()>\nwhere\n    T: Shim + Send + Sync + 'static,\n{\n    // Parse command line\n    let os_args: Vec<_> = env::args_os().collect();\n    let flags = args::parse(&os_args[1..])?;\n\n    if flags.namespace.is_empty() {\n        return Err(Error::InvalidArgument(String::from(\n            \"Shim namespace cannot be empty\",\n        )));\n    }\n\n    let ttrpc_address = env::var(TTRPC_ADDRESS)?;\n\n    // Create shim instance\n    let mut config = opts.unwrap_or_default();\n\n    // Setup signals (On Linux need register signals before start main app according to signal_hook docs)\n    let signals = setup_signals(&config);\n\n    if !config.no_sub_reaper {\n        reap::set_subreaper()?;\n    }\n\n    let mut shim = T::new(runtime_id, &flags, &mut config);\n\n    match flags.action.as_str() {\n        \"start\" => {\n            let args = StartOpts {\n                id: flags.id,\n                publish_binary: flags.publish_binary,\n                address: flags.address,\n                ttrpc_address,\n                namespace: flags.namespace,\n                debug: flags.debug,\n            };\n\n            let address = shim.start_shim(args)?;\n\n            std::io::stdout()\n                .lock()\n                .write_fmt(format_args!(\"{}\", address))\n                .map_err(io_error!(e, \"write stdout\"))?;\n\n            Ok(())\n        }\n        \"delete\" => {\n            std::thread::spawn(move || handle_signals(signals));\n            let response = shim.delete_shim()?;\n            let stdout = std::io::stdout();\n            let mut locked = stdout.lock();\n            response.write_to_writer(&mut locked)?;\n\n            Ok(())\n        }\n        _ => {\n            if flags.socket.is_empty() {\n                return Err(Error::InvalidArgument(String::from(\n                    \"Shim socket cannot be empty\",\n                )));\n            }\n\n            #[cfg(windows)]\n            util::setup_debugger_event();\n\n            if !config.no_setup_logger {\n                logger::init(\n                    flags.debug,\n                    &config.default_log_level,\n                    &flags.namespace,\n                    &flags.id,\n                )?;\n            }\n\n            let publisher = publisher::RemotePublisher::new(&ttrpc_address)?;\n            let task = Box::new(shim.create_task_service(publisher))\n                as Box<dyn containerd_shim_protos::Task + Send + Sync + 'static>;\n            let task_service = create_task(Arc::from(task));\n            let Some(mut server) = create_server_with_retry(&flags)? else {\n                signal_server_started();\n                return Ok(());\n            };\n            server = server.register_service(task_service);\n            server.start()?;\n\n            signal_server_started();\n\n            info!(\"Shim successfully started, waiting for exit signal...\");\n            #[cfg(unix)]\n            std::thread::spawn(move || handle_signals(signals));\n            shim.wait();\n\n            info!(\"Shutting down shim instance\");\n            server.shutdown();\n\n            // NOTE: If the shim server is down(like oom killer), the address\n            // socket might be leaking.\n            let address = read_address()?;\n            remove_socket_silently(&address);\n            Ok(())\n        }\n    }\n}\n\n#[cfg(windows)]\n#[cfg_attr(feature = \"tracing\", tracing::instrument(skip_all, level = \"info\"))]\nfn create_server(flags: &args::Flags) -> Result<Server> {\n    start_listener(&flags.socket).map_err(io_error!(e, \"starting listener\"))?;\n    let mut server = Server::new();\n    server = server.bind(&flags.socket)?;\n    Ok(server)\n}\n\n#[cfg(unix)]\n#[cfg_attr(feature = \"tracing\", tracing::instrument(skip_all, level = \"info\"))]\nfn create_server(flags: &args::Flags) -> Result<Server> {\n    use std::os::fd::IntoRawFd;\n    let listener = start_listener(&flags.socket).map_err(io_error!(e, \"starting listener\"))?;\n    let mut server = Server::new();\n    server = server.add_listener(listener.into_raw_fd())?;\n    Ok(server)\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(skip_all, level = \"info\"))]\nfn create_server_with_retry(flags: &args::Flags) -> Result<Option<Server>> {\n    // Really try to create a server.\n    let server = match create_server(flags) {\n        Ok(server) => server,\n        Err(Error::IoError { err, .. }) if err.kind() == std::io::ErrorKind::AddrInUse => {\n            // If the address is already in use then make sure it is up and running and return the address\n            // This allows for running a single shim per container scenarios\n            if let Ok(()) = wait_socket_working(&flags.socket, 5, 200) {\n                write_address(&flags.socket)?;\n                return Ok(None);\n            }\n            remove_socket(&flags.socket)?;\n            create_server(flags)?\n        }\n        Err(e) => return Err(e),\n    };\n\n    Ok(Some(server))\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(skip_all, level = \"info\"))]\nfn setup_signals(_config: &Config) -> Option<AppSignals> {\n    #[cfg(unix)]\n    {\n        let signals = Signals::new([SIGTERM, SIGINT, SIGPIPE]).expect(\"new signal failed\");\n        if !_config.no_reaper {\n            signals.add_signal(SIGCHLD).expect(\"add signal failed\");\n        }\n        Some(AppSignals { signals })\n    }\n\n    #[cfg(windows)]\n    {\n        unsafe {\n            SEMAPHORE = CreateSemaphoreA(ptr::null_mut(), 0, MAX_SEM_COUNT, ptr::null());\n            if SEMAPHORE == 0 {\n                panic!(\"Failed to create semaphore: {}\", io::Error::last_os_error());\n            }\n\n            if SetConsoleCtrlHandler(Some(signal_handler), 1) == 0 {\n                let e = io::Error::last_os_error();\n                CloseHandle(SEMAPHORE);\n                SEMAPHORE = 0 as HANDLE;\n                panic!(\"Failed to set console handler: {}\", e);\n            }\n        }\n        None\n    }\n}\n\n#[cfg(windows)]\nunsafe extern \"system\" fn signal_handler(_: u32) -> i32 {\n    ReleaseSemaphore(SEMAPHORE, 1, ptr::null_mut());\n    1\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(skip_all, level = \"info\"))]\nfn handle_signals(mut _signals: Option<AppSignals>) {\n    #[cfg(unix)]\n    {\n        let mut app_signals = _signals.take().unwrap();\n        loop {\n            for sig in app_signals.signals.wait() {\n                match sig {\n                    SIGTERM | SIGINT => {\n                        debug!(\"received {}\", sig);\n                    }\n                    SIGCHLD => loop {\n                        // Note that this thread sticks to child even it is suspended.\n                        match wait::waitpid(Some(Pid::from_raw(-1)), Some(WaitPidFlag::WNOHANG)) {\n                            Ok(WaitStatus::Exited(pid, status)) => {\n                                monitor::monitor_notify_by_pid(pid.as_raw(), status)\n                                    .unwrap_or_else(|e| error!(\"failed to send exit event {}\", e))\n                            }\n                            Ok(WaitStatus::Signaled(pid, sig, _)) => {\n                                debug!(\"child {} terminated({})\", pid, sig);\n                                let exit_code = 128 + sig as i32;\n                                monitor::monitor_notify_by_pid(pid.as_raw(), exit_code)\n                                    .unwrap_or_else(|e| error!(\"failed to send signal event {}\", e))\n                            }\n                            Ok(WaitStatus::StillAlive) => {\n                                break;\n                            }\n                            Err(Errno::ECHILD) => {\n                                break;\n                            }\n                            Err(e) => {\n                                // stick until all children will be successfully waited, even some unexpected error occurs.\n                                warn!(\"error occurred in signal handler: {}\", e);\n                            }\n                            _ => {} // stick until exit\n                        }\n                    },\n                    _ => {\n                        if let Ok(sig) = Signal::try_from(sig) {\n                            debug!(\"received {}\", sig);\n                        } else {\n                            warn!(\"received invalid signal {}\", sig);\n                        }\n                    }\n                }\n            }\n        }\n    }\n\n    #[cfg(windows)]\n    {\n        // must start on thread as waitforSingleObject puts the current thread to sleep\n        loop {\n            unsafe {\n                WaitForSingleObject(SEMAPHORE, INFINITE);\n                //Windows doesn't have similar signal like SIGCHLD\n                // We could implement something if required but for now\n            }\n        }\n    }\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\nfn wait_socket_working(address: &str, interval_in_ms: u64, count: u32) -> Result<()> {\n    for _i in 0..count {\n        match Client::connect(address) {\n            Ok(_) => {\n                return Ok(());\n            }\n            Err(_) => {\n                std::thread::sleep(std::time::Duration::from_millis(interval_in_ms));\n            }\n        }\n    }\n    Err(other!(\"time out waiting for socket {}\", address))\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\nfn remove_socket_silently(address: &str) {\n    remove_socket(address).unwrap_or_else(|e| warn!(\"failed to remove file {} {:?}\", address, e))\n}\n\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\nfn remove_socket(address: &str) -> Result<()> {\n    #[cfg(unix)]\n    {\n        let path = parse_sockaddr(address);\n        if let Ok(md) = Path::new(path).metadata() {\n            if md.file_type().is_socket() {\n                fs::remove_file(path).map_err(io_error!(e, \"remove socket\"))?;\n            }\n        }\n    }\n\n    #[cfg(windows)]\n    {\n        let mut opts = OpenOptions::new();\n        opts.read(true)\n            .write(true)\n            .custom_flags(FILE_FLAG_OVERLAPPED);\n        if let Ok(f) = opts.open(address) {\n            info!(\"attempting to remove existing named pipe: {}\", address);\n            unsafe { CloseHandle(f.as_raw_handle() as isize) };\n        }\n    }\n\n    Ok(())\n}\n\n/// Spawn is a helper func to launch shim process.\n/// Typically this expected to be called from `StartShim`.\n#[cfg_attr(feature = \"tracing\", tracing::instrument(level = \"info\"))]\npub fn spawn(opts: StartOpts, grouping: &str, vars: Vec<(&str, &str)>) -> Result<(u32, String)> {\n    let cmd = env::current_exe().map_err(io_error!(e, \"\"))?;\n    let cwd = env::current_dir().map_err(io_error!(e, \"\"))?;\n    let address = socket_address(&opts.address, &opts.namespace, grouping);\n\n    // Activation pattern comes from the hcsshim: https://github.com/microsoft/hcsshim/blob/v0.10.0-rc.7/cmd/containerd-shim-runhcs-v1/serve.go#L57-L70\n    // another way to do it would to create named pipe and pass it to the child process through handle inheritence but that would require duplicating\n    // the logic in Rust's 'command' for process creation.  There is an  issue in Rust to make it simplier to specify handle inheritence and this could\n    // be revisited once https://github.com/rust-lang/rust/issues/54760 is implemented.\n\n    let mut command = Command::new(cmd);\n    command\n        .current_dir(cwd)\n        .stdout(Stdio::piped())\n        .stdin(Stdio::null())\n        .stderr(Stdio::null())\n        .envs(vars)\n        .args([\n            \"-namespace\",\n            &opts.namespace,\n            \"-id\",\n            &opts.id,\n            \"-address\",\n            &opts.address,\n            \"-socket\",\n            &address,\n        ]);\n\n    if opts.debug {\n        command.arg(\"-debug\");\n    }\n\n    // On Windows Rust currently sets the `HANDLE_FLAG_INHERIT` flag to true when using Command::spawn.\n    // When a child process is spawned by another process (containerd) the child process inherits the parent's stdin, stdout, and stderr handles.\n    // Due to the HANDLE_FLAG_INHERIT flag being set to true this will cause containerd to hand until the child process closes the handles.\n    // As a workaround we can Disables inheritance on the io pipe handles.\n    // This workaround comes from https://github.com/rust-lang/rust/issues/54760#issuecomment-1045940560\n    #[cfg(windows)]\n    disable_handle_inheritance();\n\n    let mut child = command.spawn().map_err(io_error!(e, \"spawn shim\"))?;\n\n    let mut reader = child.stdout.take().unwrap();\n    std::io::copy(&mut reader, &mut std::io::stderr()).unwrap();\n\n    Ok((child.id(), address))\n}\n\n#[cfg(windows)]\nfn disable_handle_inheritance() {\n    use windows_sys::Win32::{\n        Foundation::{SetHandleInformation, HANDLE_FLAG_INHERIT},\n        System::Console::{GetStdHandle, STD_ERROR_HANDLE, STD_INPUT_HANDLE, STD_OUTPUT_HANDLE},\n    };\n\n    unsafe {\n        let std_err = GetStdHandle(STD_ERROR_HANDLE);\n        let std_in = GetStdHandle(STD_INPUT_HANDLE);\n        let std_out = GetStdHandle(STD_OUTPUT_HANDLE);\n\n        for handle in [std_err, std_in, std_out] {\n            SetHandleInformation(handle, HANDLE_FLAG_INHERIT, 0);\n            //info!(\" handle for... {:?}\", handle);\n            //CloseHandle(handle);\n        }\n    }\n}\n\n// This closes the stdout handle which was mapped to the stderr on the first invocation of the shim.\n// This releases first process which will give containerd the address of the namedpipe.\n#[cfg(windows)]\nfn signal_server_started() {\n    use windows_sys::Win32::System::Console::{GetStdHandle, STD_OUTPUT_HANDLE};\n\n    unsafe {\n        let std_out = GetStdHandle(STD_OUTPUT_HANDLE);\n\n        {\n            let handle = std_out;\n            CloseHandle(handle);\n        }\n    }\n}\n\n// This closes the stdout handle which was mapped to the stderr on the first invocation of the shim.\n// This releases first process which will give containerd the address of the namedpipe.\n#[cfg(unix)]\nfn signal_server_started() {\n    use libc::{dup2, STDERR_FILENO, STDOUT_FILENO};\n\n    unsafe {\n        if dup2(STDERR_FILENO, STDOUT_FILENO) < 0 {\n            panic!(\"Error closing pipe: {}\", std::io::Error::last_os_error())\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::thread;\n\n    use super::*;\n\n    #[test]\n    fn exit_signal() {\n        let signal = Arc::new(ExitSignal::default());\n\n        let cloned = Arc::clone(&signal);\n        let handle = thread::spawn(move || {\n            cloned.signal();\n        });\n\n        signal.wait();\n\n        if let Err(err) = handle.join() {\n            panic!(\"{:?}\", err);\n        }\n    }\n\n    struct Nop {}\n\n    struct NopTask {}\n    impl Task for NopTask {}\n\n    impl Shim for Nop {\n        type T = NopTask;\n\n        fn new(_runtime_id: &str, _args: &Flags, _config: &mut Config) -> Self {\n            Nop {}\n        }\n\n        fn start_shim(&mut self, _opts: StartOpts) -> Result<String> {\n            Ok(\"\".to_string())\n        }\n\n        fn delete_shim(&mut self) -> Result<DeleteResponse> {\n            Ok(DeleteResponse::default())\n        }\n\n        fn wait(&mut self) {}\n\n        fn create_task_service(&self, _publisher: RemotePublisher) -> Self::T {\n            NopTask {}\n        }\n    }\n\n    #[test]\n    fn no_namespace() {\n        let runtime_id = \"test\";\n        let res = bootstrap::<Nop>(runtime_id, None);\n        assert!(res.is_err());\n        assert!(res\n            .unwrap_err()\n            .to_string()\n            .contains(\"Shim namespace cannot be empty\"));\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/synchronous/monitor.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{\n    collections::HashMap,\n    sync::{\n        mpsc::{channel, Receiver, Sender},\n        LazyLock, Mutex,\n    },\n};\n\nuse log::{error, warn};\n\nuse crate::{\n    monitor::{ExitEvent, Subject, Topic},\n    Result,\n};\n\npub static MONITOR: LazyLock<Mutex<Monitor>> = LazyLock::new(|| {\n    Mutex::new(Monitor {\n        seq_id: 0,\n        subscribers: HashMap::new(),\n        topic_subs: HashMap::new(),\n    })\n});\n\npub fn monitor_subscribe(topic: Topic) -> Result<Subscription> {\n    let mut monitor = MONITOR.lock().unwrap();\n    let s = monitor.subscribe(topic)?;\n    Ok(s)\n}\n\npub fn monitor_notify_by_pid(pid: i32, exit_code: i32) -> Result<()> {\n    let monitor = MONITOR.lock().unwrap();\n    monitor.notify_by_pid(pid, exit_code)\n}\n\npub fn monitor_notify_by_exec(id: &str, exec_id: &str, exit_code: i32) -> Result<()> {\n    let monitor = MONITOR.lock().unwrap();\n    monitor.notify_by_exec(id, exec_id, exit_code)\n}\n\npub struct Monitor {\n    pub(crate) seq_id: i64,\n    pub(crate) subscribers: HashMap<i64, Subscriber>,\n    pub(crate) topic_subs: HashMap<Topic, Vec<i64>>,\n}\n\npub(crate) struct Subscriber {\n    pub(crate) topic: Topic,\n    pub(crate) tx: Sender<ExitEvent>,\n}\n\npub struct Subscription {\n    pub id: i64,\n    pub rx: Receiver<ExitEvent>,\n}\n\nimpl Monitor {\n    pub fn subscribe(&mut self, topic: Topic) -> Result<Subscription> {\n        let (tx, rx) = channel::<ExitEvent>();\n        let id = self.seq_id;\n        self.seq_id += 1;\n        let subscriber = Subscriber {\n            tx,\n            topic: topic.clone(),\n        };\n        self.subscribers.insert(id, subscriber);\n        self.topic_subs.entry(topic).or_default().push(id);\n        Ok(Subscription { id, rx })\n    }\n\n    pub fn notify_by_pid(&self, pid: i32, exit_code: i32) -> Result<()> {\n        let subject = Subject::Pid(pid);\n        self.notify_topic(&Topic::Pid, &subject, exit_code);\n        self.notify_topic(&Topic::All, &subject, exit_code);\n        Ok(())\n    }\n\n    pub fn notify_by_exec(&self, cid: &str, exec_id: &str, exit_code: i32) -> Result<()> {\n        let subject = Subject::Exec(cid.into(), exec_id.into());\n        self.notify_topic(&Topic::Exec, &subject, exit_code);\n        self.notify_topic(&Topic::All, &subject, exit_code);\n        Ok(())\n    }\n\n    fn notify_topic(&self, topic: &Topic, subject: &Subject, exit_code: i32) {\n        self.topic_subs.get(topic).map_or((), |subs| {\n            for i in subs {\n                self.subscribers.get(i).and_then(|sub| {\n                    sub.tx\n                        .send(ExitEvent {\n                            subject: subject.clone(),\n                            exit_code,\n                        })\n                        .map_err(|e| warn!(\"failed to send {}\", e))\n                        .ok()\n                });\n            }\n        })\n    }\n\n    pub fn unsubscribe(&mut self, id: i64) -> Result<()> {\n        let sub = self.subscribers.remove(&id);\n        if let Some(s) = sub {\n            self.topic_subs.get_mut(&s.topic).map(|v| {\n                v.iter().position(|&x| x == id).map(|i| {\n                    v.remove(i);\n                })\n            });\n        }\n        Ok(())\n    }\n}\n\nimpl Drop for Subscription {\n    fn drop(&mut self) {\n        let mut monitor = MONITOR.lock().unwrap();\n        monitor.unsubscribe(self.id).unwrap_or_else(|e| {\n            error!(\"failed to unsubscribe the subscription {}, {}\", self.id, e);\n        });\n    }\n}\n\npub fn wait_pid(pid: i32, s: Subscription) -> i32 {\n    loop {\n        if let Ok(ExitEvent {\n            subject: Subject::Pid(epid),\n            exit_code: code,\n        }) = s.rx.recv()\n        {\n            if pid == epid {\n                return code;\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/synchronous/publisher.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n//! Implements a client to publish events from the shim back to containerd.\n\nuse client::{\n    protobuf::MessageDyn,\n    shim::{event::Envelope, events},\n    ttrpc::{self, context::Context},\n    types::empty,\n    Client, Events, EventsClient,\n};\nuse containerd_shim_protos as client;\n\n#[cfg(unix)]\nuse crate::util::connect;\n#[cfg(target_os = \"windows\")] // Prevent unused warning.\nuse crate::Error;\nuse crate::{\n    error::Result,\n    util::{convert_to_any, timestamp},\n};\n\n#[cfg(windows)]\nconst RETRY_COUNT: i32 = 3;\n\n/// Remote publisher connects to containerd's TTRPC endpoint to publish events from shim.\npub struct RemotePublisher {\n    client: EventsClient,\n}\n\nimpl RemotePublisher {\n    /// Connect to containerd's TTRPC endpoint.\n    ///\n    /// containerd uses `/run/containerd/containerd.sock.ttrpc` by default\n    pub fn new(address: impl AsRef<str>) -> Result<RemotePublisher> {\n        let client = Self::connect(address)?;\n\n        Ok(RemotePublisher {\n            client: EventsClient::new(client),\n        })\n    }\n\n    fn connect(address: impl AsRef<str>) -> Result<Client> {\n        #[cfg(unix)]\n        {\n            let fd = connect(address)?;\n            // Client::new() takes ownership of the RawFd.\n            Client::new(fd).map_err(|err| err.into())\n        }\n\n        #[cfg(windows)]\n        {\n            for i in 0..RETRY_COUNT {\n                match Client::connect(address.as_ref()) {\n                    Ok(client) => return Ok(client),\n                    Err(e) => match e {\n                        ttrpc::Error::Windows(231) => {\n                            // ERROR_PIPE_BUSY\n                            log::debug!(\"pipe busy during connect. try number {}\", i);\n                            std::thread::sleep(std::time::Duration::from_millis(5));\n                        }\n                        _ => return Err(e.into()),\n                    },\n                }\n            }\n            Err(other!(\"failed to connect to {}\", address.as_ref()))\n        }\n    }\n\n    /// Publish a new event.\n    ///\n    /// Event object can be anything that Protobuf able serialize (e.g. implement `Message` trait).\n    pub fn publish(\n        &self,\n        ctx: Context,\n        topic: &str,\n        namespace: &str,\n        event: Box<dyn MessageDyn>,\n    ) -> Result<()> {\n        let mut envelope = Envelope::new();\n        envelope.set_topic(topic.to_owned());\n        envelope.set_namespace(namespace.to_owned());\n        envelope.set_timestamp(timestamp()?);\n        envelope.set_event(convert_to_any(event)?);\n\n        let mut req = events::ForwardRequest::new();\n        req.set_envelope(envelope);\n\n        self.client.forward(ctx, &req)?;\n\n        Ok(())\n    }\n}\n\nimpl Events for RemotePublisher {\n    fn forward(\n        &self,\n        _ctx: &ttrpc::TtrpcContext,\n        req: events::ForwardRequest,\n    ) -> ttrpc::Result<empty::Empty> {\n        self.client.forward(Context::default(), &req)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use std::sync::{Arc, Barrier};\n\n    use client::{\n        api::{Empty, ForwardRequest},\n        events::task::TaskOOM,\n    };\n    use ttrpc::Server;\n\n    use super::*;\n    #[cfg(windows)]\n    use crate::synchronous::wait_socket_working;\n\n    struct FakeServer {}\n\n    impl Events for FakeServer {\n        fn forward(&self, _ctx: &ttrpc::TtrpcContext, req: ForwardRequest) -> ttrpc::Result<Empty> {\n            let env = req.envelope();\n            assert_eq!(env.topic(), \"/tasks/oom\");\n            Ok(Empty::default())\n        }\n    }\n\n    #[test]\n    fn test_connect() {\n        #[cfg(unix)]\n        let tmpdir = tempfile::tempdir().unwrap();\n        #[cfg(unix)]\n        let path = format!(\"{}/socket\", tmpdir.as_ref().to_str().unwrap());\n        #[cfg(windows)]\n        let path = \"\\\\\\\\.\\\\pipe\\\\test-pipe\".to_string();\n        let path1 = path.clone();\n\n        assert!(RemotePublisher::connect(\"a\".repeat(16384)).is_err());\n        assert!(RemotePublisher::connect(&path).is_err());\n\n        let barrier = Arc::new(Barrier::new(2));\n        let barrier2 = barrier.clone();\n        let thread = std::thread::spawn(move || {\n            let mut server = create_server(&path1);\n\n            server.start().unwrap();\n\n            #[cfg(windows)]\n            // make sure pipe is ready on windows\n            wait_socket_working(&path1, 5, 5).unwrap();\n\n            barrier2.wait();\n\n            barrier2.wait();\n            server.shutdown();\n        });\n\n        barrier.wait();\n        let client = RemotePublisher::new(&path).unwrap();\n        let mut msg = TaskOOM::new();\n        msg.set_container_id(\"test\".to_string());\n        client\n            .publish(Context::default(), \"/tasks/oom\", \"ns1\", Box::new(msg))\n            .unwrap();\n        barrier.wait();\n\n        thread.join().unwrap();\n    }\n\n    fn create_server(server_address: &str) -> Server {\n        #[cfg(unix)]\n        {\n            use std::os::unix::{io::AsRawFd, net::UnixListener};\n            let listener = UnixListener::bind(server_address).unwrap();\n            listener.set_nonblocking(true).unwrap();\n            let task = Box::new(FakeServer {}) as Box<dyn Events + Send + Sync>;\n            let service = client::create_events(task.into());\n            let server = Server::new()\n                .add_listener(listener.as_raw_fd())\n                .unwrap()\n                .register_service(service);\n            std::mem::forget(listener);\n            server\n        }\n\n        #[cfg(windows)]\n        {\n            let service = client::create_events(Arc::new(FakeServer {}));\n\n            Server::new()\n                .bind(server_address)\n                .unwrap()\n                .register_service(service)\n        }\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/synchronous/util.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{\n    fs::{rename, File, OpenOptions},\n    io::{Read, Write},\n    path::Path,\n};\n\nuse containerd_shim_protos::shim::oci::Options;\n#[cfg(unix)]\nuse libc::mode_t;\nuse log::warn;\n#[cfg(unix)]\nuse nix::sys::stat::Mode;\nuse oci_spec::runtime::Spec;\n\nuse crate::{\n    util::{JsonOptions, OPTIONS_FILE_NAME, RUNTIME_FILE_NAME},\n    Error,\n};\n\npub fn read_file_to_str<P: AsRef<Path>>(filename: P) -> crate::Result<String> {\n    let mut file = File::open(&filename).map_err(io_error!(\n        e,\n        \"open {}\",\n        filename.as_ref().to_string_lossy()\n    ))?;\n    let mut content: String = String::new();\n    file.read_to_string(&mut content).map_err(io_error!(\n        e,\n        \"read {}\",\n        filename.as_ref().to_string_lossy()\n    ))?;\n    Ok(content)\n}\n\npub fn read_options(bundle: impl AsRef<Path>) -> crate::Result<Options> {\n    let path = bundle.as_ref().join(OPTIONS_FILE_NAME);\n    let opts_str = read_file_to_str(path)?;\n    let json_opt: JsonOptions = serde_json::from_str(&opts_str)?;\n    Ok(json_opt.into())\n}\n\npub fn read_runtime(bundle: impl AsRef<Path>) -> crate::Result<String> {\n    let path = bundle.as_ref().join(RUNTIME_FILE_NAME);\n    read_file_to_str(path)\n}\n\npub fn read_address() -> crate::Result<String> {\n    let path = Path::new(\"address\");\n    read_file_to_str(path)\n}\n\npub fn read_pid_from_file(pid_path: &Path) -> crate::Result<i32> {\n    let pid_str = read_file_to_str(pid_path)?;\n    let pid = pid_str.parse::<i32>()?;\n    Ok(pid)\n}\n\npub fn write_str_to_path(filename: &Path, s: &str) -> crate::Result<()> {\n    let file = filename\n        .file_name()\n        .ok_or_else(|| Error::InvalidArgument(String::from(\"pid path illegal\")))?;\n    let tmp_path = filename\n        .parent()\n        .map(|x| x.join(format!(\".{}\", file.to_str().unwrap_or(\"\"))))\n        .ok_or_else(|| Error::InvalidArgument(String::from(\"failed to create tmp path\")))?;\n    let tmp_path = tmp_path\n        .to_str()\n        .ok_or_else(|| Error::InvalidArgument(String::from(\"failed to get path\")))?;\n    let mut f = OpenOptions::new()\n        .write(true)\n        .create_new(true)\n        .open(tmp_path)\n        .map_err(io_error!(e, \"open {}\", filename.to_str().unwrap()))?;\n    f.write_all(s.as_bytes())\n        .map_err(io_error!(e, \"write tmp file\"))?;\n    rename(tmp_path, filename).map_err(io_error!(\n        e,\n        \"rename tmp file to {}\",\n        filename.to_str().unwrap()\n    ))?;\n    Ok(())\n}\n\npub fn write_options(bundle: &str, opt: &Options) -> crate::Result<()> {\n    let json_opt = JsonOptions::from(opt.to_owned());\n    let opts_str = serde_json::to_string(&json_opt)?;\n    let path = Path::new(bundle).join(OPTIONS_FILE_NAME);\n    write_str_to_path(path.as_path(), opts_str.as_str())\n}\n\npub fn write_runtime(bundle: &str, binary_name: &str) -> crate::Result<()> {\n    let path = Path::new(bundle).join(RUNTIME_FILE_NAME);\n    write_str_to_path(path.as_path(), binary_name)\n}\n\npub fn write_address(address: &str) -> crate::Result<()> {\n    let path = Path::new(\"address\");\n    write_str_to_path(path, address)\n}\n\npub fn read_spec_from_file(bundle: &str) -> crate::Result<Spec> {\n    let path = Path::new(bundle).join(\"config.json\");\n    Spec::load(path).map_err(other_error!(\"read spec file\"))\n}\n\n#[cfg(unix)]\npub fn mkdir(path: impl AsRef<Path>, mode: mode_t) -> crate::Result<()> {\n    let path_buf = path.as_ref().to_path_buf();\n    if !path_buf.as_path().exists() {\n        let mode = Mode::from_bits(mode).ok_or_else(|| other!(\"invalid dir mode {}\", mode))?;\n        nix::unistd::mkdir(path_buf.as_path(), mode)?;\n    }\n    Ok(())\n}\n\n/// A helper to help remove temperate file or dir when it became useless\npub struct HelperRemoveFile {\n    path: String,\n}\n\nimpl HelperRemoveFile {\n    pub fn new(path: String) -> Self {\n        Self { path }\n    }\n}\n\nimpl Drop for HelperRemoveFile {\n    fn drop(&mut self) {\n        std::fs::remove_file(&self.path)\n            .unwrap_or_else(|e| warn!(\"remove dir {} error: {}\", &self.path, e));\n    }\n}\n\n#[cfg(target_os = \"windows\")]\n// helper to configure pause thread until signaled. Useful in attaching a debugger\n// https://github.com/microsoft/hcsshim/blob/v0.10.0-rc.7/cmd/containerd-shim-runhcs-v1/serve.go#L313-L315\n// use with https://github.com/moby/docker-signal\npub(crate) fn setup_debugger_event() {\n    use std::{env, io, process};\n\n    use log::{debug, error};\n    use windows_sys::Win32::System::Threading::{WaitForSingleObject, INFINITE};\n\n    let debugger = env::var(\"SHIM_DEBUGGER\").unwrap_or_else(|_| \"\".to_string());\n    if debugger.is_empty() {\n        return;\n    }\n    let event_name = format!(\"Global\\\\debugger-{}\", process::id());\n    debug!(\"Halting until signalled: {}\", event_name);\n    let e = match create_event(event_name) {\n        Ok(e) => e,\n        Err(e) => {\n            error!(\"failed to create event for debugger: {}\", e);\n            return;\n        }\n    };\n    match unsafe { WaitForSingleObject(e, INFINITE) } {\n        0 => {}\n        _ => {\n            error!(\n                \"failed to wait for debugger event: {}\",\n                io::Error::last_os_error()\n            );\n            return;\n        }\n    }\n    debug!(\"signal received, continuing\");\n}\n\n#[cfg(target_os = \"windows\")]\nfn create_event(name: String) -> crate::Result<isize> {\n    use std::{ffi::OsStr, io, os::windows::prelude::OsStrExt};\n\n    use windows_sys::Win32::System::Threading::CreateEventW;\n\n    let name = OsStr::new(name.as_str())\n        .encode_wide()\n        .chain(Some(0)) // add NULL termination\n        .collect::<Vec<_>>();\n\n    let result = unsafe { CreateEventW(std::ptr::null_mut(), 0, 0, name.as_ptr()) };\n    match result {\n        0 => Err(Error::Other(io::Error::last_os_error().to_string())),\n        _ => Ok(result),\n    }\n}\n"
  },
  {
    "path": "crates/shim/src/util.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n#[cfg(unix)]\nuse std::os::unix::io::RawFd;\nuse std::time::{SystemTime, UNIX_EPOCH};\n\nuse serde::{Deserialize, Serialize};\nuse time::OffsetDateTime;\n\n#[cfg(feature = \"async\")]\npub use crate::asynchronous::util::*;\n#[cfg(not(feature = \"async\"))]\npub use crate::synchronous::util::*;\nuse crate::{\n    api::Options,\n    error::Result,\n    protos::protobuf::{\n        well_known_types::{any::Any, timestamp::Timestamp},\n        MessageDyn,\n    },\n};\n\npub const CONFIG_FILE_NAME: &str = \"config.json\";\npub const OPTIONS_FILE_NAME: &str = \"options.json\";\npub const RUNTIME_FILE_NAME: &str = \"runtime\";\n\n// Define JsonOptions here for Json serialize and deserialize\n// as rust-protobuf hasn't released serde_derive feature,\n// see https://github.com/stepancheg/rust-protobuf/#serde_derive-support\n#[derive(Debug, Deserialize, Serialize)]\n#[serde(deny_unknown_fields)]\npub struct JsonOptions {\n    #[serde(default)]\n    pub no_pivot_root: bool,\n    #[serde(default)]\n    pub no_new_keyring: bool,\n    pub shim_cgroup: ::std::string::String,\n    #[serde(default)]\n    pub io_uid: u32,\n    #[serde(default)]\n    pub io_gid: u32,\n    pub binary_name: ::std::string::String,\n    pub root: ::std::string::String,\n    #[serde(default)]\n    pub systemd_cgroup: bool,\n    pub criu_image_path: ::std::string::String,\n    pub criu_work_path: ::std::string::String,\n    #[serde(default)]\n    pub task_api_address: ::std::string::String,\n    #[serde(default)]\n    pub task_api_version: u32,\n}\n\nimpl From<Options> for JsonOptions {\n    fn from(o: Options) -> Self {\n        Self {\n            no_pivot_root: o.no_pivot_root,\n            no_new_keyring: o.no_new_keyring,\n            shim_cgroup: o.shim_cgroup,\n            io_uid: o.io_uid,\n            io_gid: o.io_gid,\n            binary_name: o.binary_name,\n            root: o.root,\n            systemd_cgroup: o.systemd_cgroup,\n            criu_image_path: o.criu_image_path,\n            criu_work_path: o.criu_work_path,\n            task_api_address: o.task_api_address,\n            task_api_version: o.task_api_version,\n        }\n    }\n}\n\nimpl From<JsonOptions> for Options {\n    fn from(j: JsonOptions) -> Self {\n        Self {\n            no_pivot_root: j.no_pivot_root,\n            no_new_keyring: j.no_new_keyring,\n            shim_cgroup: j.shim_cgroup,\n            io_uid: j.io_uid,\n            io_gid: j.io_gid,\n            binary_name: j.binary_name,\n            root: j.root,\n            systemd_cgroup: j.systemd_cgroup,\n            criu_image_path: j.criu_image_path,\n            criu_work_path: j.criu_work_path,\n            task_api_address: j.task_api_address,\n            task_api_version: j.task_api_version,\n            ..Default::default()\n        }\n    }\n}\n\n#[cfg(unix)]\npub fn connect(address: impl AsRef<str>) -> Result<RawFd> {\n    use std::os::fd::IntoRawFd;\n\n    use nix::{sys::socket::*, unistd::close};\n\n    let unix_addr = UnixAddr::new(address.as_ref())?;\n\n    // SOCK_CLOEXEC flag is Linux specific\n    #[cfg(target_os = \"linux\")]\n    const SOCK_CLOEXEC: SockFlag = SockFlag::SOCK_CLOEXEC;\n\n    #[cfg(not(target_os = \"linux\"))]\n    const SOCK_CLOEXEC: SockFlag = SockFlag::empty();\n\n    let fd = socket(AddressFamily::Unix, SockType::Stream, SOCK_CLOEXEC, None)?.into_raw_fd();\n\n    // MacOS doesn't support atomic creation of a socket descriptor with `SOCK_CLOEXEC` flag,\n    // so there is a chance of leak if fork + exec happens in between of these calls.\n    #[cfg(not(target_os = \"linux\"))]\n    {\n        use std::os::fd::BorrowedFd;\n\n        use nix::fcntl::{fcntl, FcntlArg, FdFlag};\n        // SAFETY: fd is a valid file descriptor that we just created\n        let borrowed_fd = unsafe { BorrowedFd::borrow_raw(fd) };\n        fcntl(borrowed_fd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC)).inspect_err(|_| {\n            let _ = close(fd);\n        })?;\n    }\n\n    connect(fd, &unix_addr).inspect_err(|_| {\n        let _ = close(fd);\n    })?;\n\n    Ok(fd)\n}\n\npub fn timestamp() -> Result<Timestamp> {\n    let now = SystemTime::now().duration_since(UNIX_EPOCH)?;\n\n    let ts = Timestamp {\n        seconds: now.as_secs() as _,\n        nanos: now.subsec_nanos() as _,\n        ..Default::default()\n    };\n\n    Ok(ts)\n}\n\npub fn convert_to_timestamp(exited_at: Option<OffsetDateTime>) -> Timestamp {\n    let mut ts = Timestamp::new();\n    if let Some(ea) = exited_at {\n        ts.seconds = ea.unix_timestamp();\n        ts.nanos = ea.nanosecond() as i32;\n    }\n    ts\n}\n\npub fn convert_to_any(obj: Box<dyn MessageDyn>) -> Result<Any> {\n    let mut data = Vec::new();\n    obj.write_to_vec_dyn(&mut data)?;\n\n    let mut any = Any::new();\n    any.value = data;\n    any.type_url = obj.descriptor_dyn().full_name().to_string();\n\n    Ok(any)\n}\n\npub trait IntoOption\nwhere\n    Self: Sized,\n{\n    fn none_if<F>(self, callback: F) -> Option<Self>\n    where\n        F: Fn(&Self) -> bool,\n    {\n        if callback(&self) {\n            None\n        } else {\n            Some(self)\n        }\n    }\n}\n\nimpl<T> IntoOption for T {}\n\npub trait AsOption {\n    fn as_option(&self) -> Option<&Self>;\n}\n\nimpl AsOption for str {\n    fn as_option(&self) -> Option<&Self> {\n        if self.is_empty() {\n            None\n        } else {\n            Some(self)\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_timestamp() {\n        let ts = timestamp().unwrap();\n        assert!(ts.seconds > 0);\n    }\n}\n"
  },
  {
    "path": "crates/shim-protos/Cargo.toml",
    "content": "[package]\nname = \"containerd-shim-protos\"\nversion = \"0.11.0\"\nauthors = [\n    \"Maksym Pavlenko <pavlenko.maksym@gmail.com>\",\n    \"The containerd Authors\",\n]\ndescription = \"TTRPC bindings for containerd shim interfaces\"\nkeywords = [\"containerd\", \"shim\", \"containers\", \"ttrpc\", \"client\"]\ncategories = [\"api-bindings\", \"asynchronous\"]\n\nedition.workspace = true\nlicense.workspace = true\nrepository.workspace = true\nhomepage.workspace = true\n\n[features]\ndefault = []\nasync = [\"ttrpc/async\", \"async-trait\"]\nsandbox = []\ndocs = []\n\n[[example]]\nname = \"shim-proto-server\"\npath = \"examples/ttrpc-server.rs\"\n\n[[example]]\nname = \"shim-proto-client\"\npath = \"examples/ttrpc-client.rs\"\n\n[[example]]\nname = \"shim-proto-connect\"\npath = \"examples/connect.rs\"\n\n[[example]]\nname = \"shim-proto-server-async\"\npath = \"examples/ttrpc-server-async.rs\"\nrequired-features = [\"async\"]\n\n[[example]]\nname = \"shim-proto-client-async\"\npath = \"examples/ttrpc-client-async.rs\"\nrequired-features = [\"async\"]\n\n[[example]]\nname = \"shim-proto-connect-async\"\npath = \"examples/connect-async.rs\"\nrequired-features = [\"async\"]\n\n[dependencies]\nasync-trait = { workspace = true, optional = true }\nprotobuf = { version = \"3.7\", default-features = false }\nttrpc = { version = \"0.9\", default-features = false, features = [\"sync\"] }\n\n[build-dependencies]\nttrpc-codegen = \"0.6.0\"\n\n[dev-dependencies]\nctrlc = { version = \"3.5\", default-features = false, features = [\"termination\"] }\nsimple_logger = { workspace = true, features = [\"stderr\"] }\ntokio = { workspace = true, features = [\"macros\", \"rt-multi-thread\"] }\ncrossbeam = { workspace = true, features = [\"crossbeam-channel\"] } # Used by create_ttrpc_context()\n\n[package.metadata.docs.rs]\nfeatures = [\"docs\"]\n"
  },
  {
    "path": "crates/shim-protos/README.md",
    "content": "# Shim protos and client for containerd\n\n[![Crates.io](https://img.shields.io/crates/v/containerd-shim-protos)](https://crates.io/crates/containerd-shim-protos)\n[![docs.rs](https://img.shields.io/docsrs/containerd-shim-protos)](https://docs.rs/containerd-shim-protos/latest/containerd_shim_protos/)\n[![Crates.io](https://img.shields.io/crates/l/containerd-shim-protos)](https://github.com/containerd/rust-extensions/blob/main/LICENSE)\n[![CI](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml)\n\n`containerd-shim-protos` contains TTRPC bindings and client/server code to interact with containerd's runtime v2 shims.\n\n## Runtime\nThis crate is mainly expected to be useful to interact with containerd's shim runtime.\nRuntime v2 introduces a first class shim API for runtime authors to integrate with containerd.\nThe shim API is minimal and scoped to the execution lifecycle of a container.\n\nTo learn how containerd's shim v2 runtime works in details, please refer to the [documentation](https://github.com/containerd/containerd/blob/main/core/runtime/v2/README.md).\n\n## Design\nThe `containerd-shim-protos` crate provides [Protobuf](https://github.com/protocolbuffers/protobuf.git) message\nand [TTRPC](https://github.com/containerd/ttrpc.git) service definitions for the\n[Containerd shim v2](https://github.com/containerd/containerd/blob/main/api/runtime/task/v2/shim.proto) protocol.\n\nThe message and service definitions are auto-generated from protobuf source files under `vendor/`\nby using [ttrpc-codegen](https://github.com/containerd/ttrpc-rust/tree/master/ttrpc-codegen). So please do not\nedit those auto-generated source files.\n\nIf upgrading/modification is needed, please follow the steps:\n - Synchronize the latest protobuf source files from the upstream projects into directory 'vendor/'.\n - Re-generate the source files by `cargo build --features=generate_bindings`.\n - Commit the synchronized protobuf source files and auto-generated source files, keeping them in synchronization.\n\n## Usage\nAdd `containerd-shim-client` as a dependency in your `Cargo.toml`\n\n```toml\n[dependencies]\ncontainerd-shim-protos = \"0.4\"\n```\n\nBasic client code looks as follows:\n\n```rust,no_run\nuse containerd_shim_protos as client;\n\nlet client = client::Client::connect(\"unix:///containerd-shim/shim.sock\").expect(\"Failed to connect to shim\");\nlet task_client = client::TaskClient::new(client);\n\nlet context = client::ttrpc::context::with_timeout(0);\n\nlet req = client::api::ConnectRequest {\n    id: String::from(\"1\"),\n    ..Default::default()\n};\n\nlet resp = task_client.connect(context, &req).expect(\"Connect request failed\");\n```\n\n## Example\n\n- [TTRPC shim client](./examples/ttrpc-client.rs)\n- [TTRPC shim server](./examples/ttrpc-server.rs)\n- [TTRPC client connect](./examples/connect.rs).\n\nThe way to build the example:\n```bash\n# build sync connect, client and server\n$ cargo build --example shim-proto-connect\n$ sudo ./shim-proto-connect unix:///containerd-shim/shim_socket_path.sock\n$ cargo build --example shim-proto-client\n$ cargo build --example shim-proto-server\n\n# build async connect, client and server\n$ cargo build --example shim-proto-connect-async --features async\n$ cargo build --example shim-proto-client-async --features async\n$ cargo build --example shim-proto-server-async --features async\n```\n"
  },
  {
    "path": "crates/shim-protos/build.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{\n    env, fs,\n    fs::File,\n    io::{BufRead, BufReader},\n    path::PathBuf,\n};\n\nuse ttrpc_codegen::{Codegen, Customize, ProtobufCustomize};\n\nfn main() {\n    genmodule(\n        \"types\",\n        &[\n            \"vendor/gogoproto/gogo.proto\",\n            \"vendor/google/protobuf/empty.proto\",\n            \"vendor/github.com/containerd/containerd/api/types/fieldpath.proto\",\n            \"vendor/github.com/containerd/containerd/api/types/mount.proto\",\n            \"vendor/github.com/containerd/containerd/api/types/task/task.proto\",\n            \"vendor/github.com/containerd/containerd/api/types/introspection.proto\",\n            #[cfg(feature = \"sandbox\")]\n            \"vendor/github.com/containerd/containerd/api/types/platform.proto\",\n        ],\n        false,\n    );\n\n    genmodule(\n        \"cgroups\",\n        &[\"vendor/github.com/containerd/cgroups/stats/v1/metrics.proto\"],\n        false,\n    );\n\n    genmodule(\n        \"cgroups_v2\",\n        &[\"vendor/github.com/containerd/cgroups/cgroup2/stats/metrics.proto\"],\n        false,\n    );\n\n    genmodule(\n        \"stats\",\n        &[\"vendor/microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto\"],\n        false,\n    );\n\n    genmodule(\n        \"events\",\n        &[\n            \"vendor/github.com/containerd/containerd/api/types/mount.proto\",\n            \"vendor/github.com/containerd/containerd/api/events/container.proto\",\n            \"vendor/github.com/containerd/containerd/api/events/content.proto\",\n            \"vendor/github.com/containerd/containerd/api/events/image.proto\",\n            \"vendor/github.com/containerd/containerd/api/events/namespace.proto\",\n            \"vendor/github.com/containerd/containerd/api/events/sandbox.proto\",\n            \"vendor/github.com/containerd/containerd/api/events/snapshot.proto\",\n            \"vendor/github.com/containerd/containerd/api/events/task.proto\",\n        ],\n        false,\n    );\n\n    genmodule(\n        \"shim\",\n        &[\n            \"vendor/github.com/containerd/containerd/api/types/runc/options/oci.proto\",\n            \"vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.proto\",\n            \"vendor/github.com/containerd/containerd/api/types/event.proto\",\n            \"vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto\",\n        ],\n        false,\n    );\n\n    #[cfg(feature = \"async\")]\n    {\n        genmodule(\n            \"shim_async\",\n            &[\n                \"vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.proto\",\n                \"vendor/github.com/containerd/containerd/api/types/event.proto\",\n                \"vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto\",\n            ],\n            true,\n        );\n    }\n\n    #[cfg(feature = \"sandbox\")]\n    {\n        genmodule(\n            \"sandbox\",\n            &[\n                \"vendor/github.com/containerd/containerd/api/types/metrics.proto\",\n                \"vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto\",\n            ],\n            false,\n        );\n\n        #[cfg(feature = \"async\")]\n        genmodule(\n            \"sandbox_async\",\n            &[\n                \"vendor/github.com/containerd/containerd/api/types/metrics.proto\",\n                \"vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto\",\n            ],\n            true,\n        );\n    }\n}\n\nfn genmodule(name: &str, inputs: &[&str], async_all: bool) {\n    let mut out_path = PathBuf::from(env::var(\"OUT_DIR\").unwrap());\n    out_path.push(name);\n\n    fs::create_dir_all(&out_path).unwrap();\n\n    Codegen::new()\n        .inputs(inputs)\n        // Order matters: containerd/api first so containerd's internal\n        // imports like `types/fieldpath.proto` (new in v2.3.0) resolve\n        // there. `vendor/` second for third-party imports\n        // (google/, gogoproto/, microsoft/, github.com/containerd/cgroups/).\n        .include(\"vendor/github.com/containerd/containerd/api/\")\n        .include(\"vendor/\")\n        .rust_protobuf()\n        .rust_protobuf_customize(\n            ProtobufCustomize::default()\n                .gen_mod_rs(true)\n                .generate_accessors(true),\n        )\n        .customize(Customize {\n            async_all,\n            ..Default::default()\n        })\n        .out_dir(&out_path)\n        .run()\n        .expect(\"Failed to generate protos\");\n\n    // Find all *.rs files generated by TTRPC codegen\n    let files = fs::read_dir(&out_path)\n        .unwrap()\n        .filter_map(|entry| {\n            let entry = entry.unwrap();\n            if !entry.file_type().unwrap().is_file() {\n                None\n            } else {\n                Some(entry.path())\n            }\n        })\n        .collect::<Vec<_>>();\n\n    // `include!` doesn't handle files with attributes:\n    // - https://github.com/rust-lang/rust/issues/18810\n    // - https://github.com/rust-lang/rfcs/issues/752\n    // Remove all lines that start with:\n    // - #![allow(unknown_lints)]\n    // - #![cfg_attr(rustfmt, rustfmt::skip)]\n    //\n    for path in files {\n        let file = File::open(&path).unwrap();\n\n        let joined = BufReader::new(file)\n            .lines()\n            .filter_map(|line| {\n                let line = line.unwrap();\n                if line.starts_with(\"#!\") || line.starts_with(\"//!\") {\n                    None\n                } else {\n                    Some(line)\n                }\n            })\n            .collect::<Vec<_>>()\n            .join(\"\\r\\n\");\n\n        fs::write(&path, joined).unwrap();\n    }\n}\n"
  },
  {
    "path": "crates/shim-protos/examples/connect-async.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::env;\n\nuse client::{api, shim::shim_ttrpc_async::TaskClient};\nuse containerd_shim_protos as client;\nuse ttrpc::{asynchronous::Client, context::Context};\n\n#[tokio::main]\nasync fn main() {\n    let args: Vec<String> = env::args().collect();\n\n    let socket_path = args\n        .get(1)\n        .ok_or(\"First argument must be shim socket path\")\n        .unwrap();\n\n    let pid = args.get(2).map(|str| str.to_owned()).unwrap_or_default();\n\n    println!(\"Connecting to {}...\", socket_path);\n    let client = Client::connect(socket_path)\n        .await\n        .expect(\"Failed to connect to shim\");\n\n    let task_client = TaskClient::new(client);\n\n    let context = Context::default();\n\n    let req = api::ConnectRequest {\n        id: pid,\n        ..Default::default()\n    };\n\n    println!(\"Sending `Connect` request...\");\n    let resp = task_client\n        .connect(context.clone(), &req)\n        .await\n        .expect(\"Connect request failed\");\n    println!(\"Connect response: {:?}\", resp);\n\n    let req = api::ShutdownRequest {\n        id: \"123\".to_string(),\n        now: true,\n        ..Default::default()\n    };\n\n    println!(\"Sending `Shutdown` request...\");\n    let resp = task_client\n        .shutdown(context, &req)\n        .await\n        .expect(\"Failed to send shutdown request\");\n\n    println!(\"Shutdown response: {:?}\", resp)\n}\n"
  },
  {
    "path": "crates/shim-protos/examples/connect.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::env;\n\nuse client::api;\nuse containerd_shim_protos as client;\nuse ttrpc::context::Context;\n\nfn main() {\n    let args: Vec<String> = env::args().collect();\n\n    let socket_path = args\n        .get(1)\n        .ok_or(\"First argument must be shim socket path\")\n        .unwrap();\n\n    let pid = args.get(2).map(|str| str.to_owned()).unwrap_or_default();\n\n    println!(\"Connecting to {}...\", socket_path);\n    let client = client::Client::connect(socket_path).expect(\"Failed to connect to shim\");\n\n    let task_client = client::TaskClient::new(client);\n\n    let context = Context::default();\n\n    let req = api::ConnectRequest {\n        id: pid,\n        ..Default::default()\n    };\n\n    println!(\"Sending `Connect` request...\");\n    let resp = task_client\n        .connect(context.clone(), &req)\n        .expect(\"Connect request failed\");\n    println!(\"Connect response: {:?}\", resp);\n\n    let req = api::ShutdownRequest {\n        id: \"123\".to_string(),\n        now: true,\n        ..Default::default()\n    };\n\n    println!(\"Sending `Shutdown` request...\");\n    let resp = task_client\n        .shutdown(context, &req)\n        .expect(\"Failed to send shutdown request\");\n\n    println!(\"Shutdown response: {:?}\", resp)\n}\n"
  },
  {
    "path": "crates/shim-protos/examples/ttrpc-client-async.rs",
    "content": "// Copyright (c) 2019 Ant Financial\n// Copyright (c) 2021 Ant Group\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nuse containerd_shim_protos::{api::CreateTaskRequest, shim::shim_ttrpc_async::TaskClient};\nuse ttrpc::{\n    asynchronous::Client,\n    context::{self, Context},\n};\n\nfn default_ctx() -> Context {\n    let mut ctx = context::with_timeout(0);\n    ctx.add(\"key-1\".to_string(), \"value-1-1\".to_string());\n    ctx.add(\"key-1\".to_string(), \"value-1-2\".to_string());\n    ctx.set(\"key-2\".to_string(), vec![\"value-2\".to_string()]);\n\n    ctx\n}\n\n#[tokio::main]\nasync fn main() {\n    let c = Client::connect(\"unix:///tmp/shim-proto-ttrpc-001\")\n        .await\n        .unwrap();\n    let task = TaskClient::new(c);\n    let now = std::time::Instant::now();\n\n    let mut req = CreateTaskRequest::new();\n    req.set_id(\"id1\".to_owned());\n    println!(\n        \"OS Thread {:?} - task.create() started: {:?}\",\n        std::thread::current().id(),\n        now.elapsed(),\n    );\n    let resp = task.create(default_ctx(), &req).await.unwrap();\n    assert_eq!(resp.pid, 0x10c0);\n    println!(\n        \"OS Thread {:?} - task.create() -> {:?} ended: {:?}\",\n        std::thread::current().id(),\n        resp,\n        now.elapsed(),\n    );\n}\n"
  },
  {
    "path": "crates/shim-protos/examples/ttrpc-client.rs",
    "content": "// Copyright (c) 2019 Ant Financial\n// Copyright (c) 2021 Ant Group\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nuse containerd_shim_protos::{api::CreateTaskRequest, TaskClient};\nuse ttrpc::{\n    context::{self, Context},\n    Client,\n};\n\nfn main() {\n    let c = Client::connect(\"unix:///tmp/shim-proto-ttrpc-001\").unwrap();\n    let task = TaskClient::new(c);\n    let now = std::time::Instant::now();\n\n    let mut req = CreateTaskRequest::new();\n    req.set_id(\"id1\".to_owned());\n    println!(\n        \"OS Thread {:?} - task.create() started: {:?}\",\n        std::thread::current().id(),\n        now.elapsed(),\n    );\n    let resp = task.create(default_ctx(), &req).unwrap();\n    assert_eq!(resp.pid, 0x10c0);\n    println!(\n        \"OS Thread {:?} - task.create() -> {:?} ended: {:?}\",\n        std::thread::current().id(),\n        resp,\n        now.elapsed(),\n    );\n}\n\nfn default_ctx() -> Context {\n    let mut ctx = context::with_timeout(0);\n    ctx.add(\"key-1\".to_string(), \"value-1-1\".to_string());\n    ctx.add(\"key-1\".to_string(), \"value-1-2\".to_string());\n    ctx.set(\"key-2\".to_string(), vec![\"value-2\".to_string()]);\n\n    ctx\n}\n"
  },
  {
    "path": "crates/shim-protos/examples/ttrpc-server-async.rs",
    "content": "// Copyright (c) 2019 Ant Financial\n// Copyright (c) 2021 Ant Group\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nuse std::{sync::Arc, thread};\n\nuse async_trait::async_trait;\nuse containerd_shim_protos::{\n    api::{CreateTaskRequest, CreateTaskResponse},\n    shim::shim_ttrpc_async::{create_task, Task},\n};\nuse ttrpc::asynchronous::Server;\n\n#[derive(Debug, PartialEq)]\nstruct FakeServer {\n    magic: u32,\n}\n\nimpl FakeServer {\n    fn new() -> Self {\n        FakeServer { magic: 0xadcbdacf }\n    }\n}\n\n#[async_trait]\nimpl Task for FakeServer {\n    async fn create(\n        &self,\n        ctx: &::ttrpc::asynchronous::TtrpcContext,\n        req: CreateTaskRequest,\n    ) -> ::ttrpc::Result<CreateTaskResponse> {\n        let mut resp = CreateTaskResponse::default();\n        let md = &ctx.metadata;\n        let v1 = md.get(\"key-1\").unwrap();\n        let v2 = md.get(\"key-2\").unwrap();\n\n        assert_eq!(v1[0], \"value-1-1\");\n        assert_eq!(v1[1], \"value-1-2\");\n        assert_eq!(v2[0], \"value-2\");\n        assert_eq!(&req.id, \"id1\");\n\n        resp.set_pid(0x10c0);\n\n        Ok(resp)\n    }\n}\n\n#[tokio::main]\nasync fn main() {\n    simple_logger::SimpleLogger::new().init().unwrap();\n\n    let tservice = create_task(Arc::new(FakeServer::new()));\n\n    let mut server = Server::new()\n        .bind(\"unix:///tmp/shim-proto-ttrpc-001\")\n        .unwrap()\n        .register_service(tservice);\n\n    server.start().await.unwrap();\n\n    // Hold the main thread until receiving signal SIGTERM\n    let (tx, rx) = std::sync::mpsc::channel();\n    thread::spawn(move || {\n        ctrlc::set_handler(move || {\n            tx.send(()).unwrap();\n        })\n        .expect(\"Error setting Ctrl-C handler\");\n        println!(\"Server is running, press Ctrl + C to exit\");\n    });\n\n    rx.recv().unwrap();\n}\n"
  },
  {
    "path": "crates/shim-protos/examples/ttrpc-server.rs",
    "content": "// Copyright (c) 2019 Ant Financial\n// Copyright (c) 2021 Ant Group\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nuse std::{sync::Arc, thread};\n\nuse containerd_shim_protos::{\n    api::{CreateTaskRequest, CreateTaskResponse},\n    create_task, Task,\n};\nuse ttrpc::Server;\n\n#[derive(Debug, PartialEq)]\nstruct FakeServer {\n    magic: u32,\n}\n\nimpl FakeServer {\n    fn new() -> Self {\n        FakeServer { magic: 0xadcbdacf }\n    }\n}\n\nimpl Task for FakeServer {\n    fn create(\n        &self,\n        ctx: &::ttrpc::TtrpcContext,\n        req: CreateTaskRequest,\n    ) -> ::ttrpc::Result<CreateTaskResponse> {\n        let mut resp = CreateTaskResponse::default();\n        let md = &ctx.metadata;\n        let v1 = md.get(\"key-1\").unwrap();\n        let v2 = md.get(\"key-2\").unwrap();\n\n        assert_eq!(v1[0], \"value-1-1\");\n        assert_eq!(v1[1], \"value-1-2\");\n        assert_eq!(v2[0], \"value-2\");\n        assert_eq!(&req.id, \"id1\");\n\n        resp.set_pid(0x10c0);\n\n        Ok(resp)\n    }\n}\n\nfn main() {\n    simple_logger::SimpleLogger::new().init().unwrap();\n\n    let tservice = create_task(Arc::new(FakeServer::new()));\n\n    let mut server = Server::new()\n        .bind(\"unix:///tmp/shim-proto-ttrpc-001\")\n        .unwrap()\n        .register_service(tservice);\n\n    server.start().unwrap();\n\n    // Hold the main thread until receiving signal SIGTERM\n    let (tx, rx) = std::sync::mpsc::channel();\n    thread::spawn(move || {\n        ctrlc::set_handler(move || {\n            tx.send(()).unwrap();\n        })\n        .expect(\"Error setting Ctrl-C handler\");\n        println!(\"Server is running, press Ctrl + C to exit\");\n    });\n\n    rx.recv().unwrap();\n}\n"
  },
  {
    "path": "crates/shim-protos/rsync.txt",
    "content": "api/events/*.proto\napi/types/*.proto\napi/types/task/*.proto\napi/services/ttrpc/events/v1/*.proto\napi/types/runc/options/oci.proto\napi/runtime/sandbox/v1/sandbox.proto\napi/runtime/task/v2/shim.proto\n"
  },
  {
    "path": "crates/shim-protos/src/cgroups.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\npub mod metrics {\n    include!(concat!(env!(\"OUT_DIR\"), \"/cgroups/metrics.rs\"));\n}\n\nmod gogo {\n    pub use crate::types::gogo::*;\n}\n"
  },
  {
    "path": "crates/shim-protos/src/cgroups_v2.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\npub mod metrics {\n    include!(concat!(env!(\"OUT_DIR\"), \"/cgroups_v2/metrics.rs\"));\n}\n"
  },
  {
    "path": "crates/shim-protos/src/events.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\npub mod container {\n    include!(concat!(env!(\"OUT_DIR\"), \"/events/container.rs\"));\n}\n\npub mod content {\n    include!(concat!(env!(\"OUT_DIR\"), \"/events/content.rs\"));\n}\n\npub mod image {\n    include!(concat!(env!(\"OUT_DIR\"), \"/events/image.rs\"));\n}\n\npub mod namespace {\n    include!(concat!(env!(\"OUT_DIR\"), \"/events/namespace.rs\"));\n}\n\npub mod snapshot {\n    include!(concat!(env!(\"OUT_DIR\"), \"/events/snapshot.rs\"));\n}\n\npub mod task {\n    include!(concat!(env!(\"OUT_DIR\"), \"/events/task.rs\"));\n}\n\npub mod sandbox {\n    include!(concat!(env!(\"OUT_DIR\"), \"/events/sandbox.rs\"));\n}\n\nmod mount {\n    pub use crate::types::mount::*;\n}\n\nmod gogo {\n    pub use crate::types::gogo::*;\n}\n\nmod fieldpath {\n    pub use crate::types::fieldpath::*;\n}\n"
  },
  {
    "path": "crates/shim-protos/src/lib.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n#![cfg_attr(feature = \"docs\", doc = include_str!(\"../README.md\"))]\n#![allow(warnings)]\n\npub use protobuf;\npub use ttrpc;\n\npub mod cgroups;\npub mod cgroups_v2;\npub mod events;\n#[cfg(feature = \"sandbox\")]\nmod sandbox;\npub mod shim;\npub mod types;\npub mod windows;\n\n/// Includes event names shims can publish to containerd.\npub mod topics;\n\npub mod shim_sync {\n    /// TTRPC client reexport for easier access.\n    pub use ttrpc::Client;\n\n    /// Shim events service.\n    pub use crate::shim::events_ttrpc::{create_events, Events, EventsClient};\n    /// Shim task service.\n    pub use crate::shim::shim_ttrpc::{create_task, Task, TaskClient};\n}\n\npub use shim_sync::*;\n\n#[cfg(feature = \"async\")]\npub mod shim_async {\n    /// TTRPC client reexport for easier access.\n    pub use ttrpc::asynchronous::Client;\n\n    /// Shim events service.\n    pub use crate::shim::events_ttrpc_async::{create_events, Events, EventsClient};\n    /// Shim task service.\n    pub use crate::shim::shim_ttrpc_async::{create_task, Task, TaskClient};\n}\n\n/// Reexport auto-generated public data structures.\npub mod api {\n    pub use crate::shim::{empty::*, events::*, mount::*, shim::*, task::*};\n}\n\n#[cfg(feature = \"sandbox\")]\npub use sandbox::sandbox as sandbox_api;\n\n#[cfg(feature = \"sandbox\")]\npub mod sandbox_sync {\n    pub use crate::sandbox::sandbox_ttrpc::*;\n}\n\n#[cfg(all(feature = \"sandbox\", feature = \"async\"))]\npub mod sandbox_async {\n    pub use crate::sandbox::sandbox_async::*;\n}\n"
  },
  {
    "path": "crates/shim-protos/src/sandbox.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\npub mod sandbox {\n    include!(concat!(env!(\"OUT_DIR\"), \"/sandbox/sandbox.rs\"));\n}\n\npub mod metrics {\n    include!(concat!(env!(\"OUT_DIR\"), \"/sandbox/metrics.rs\"));\n}\n\npub mod sandbox_ttrpc {\n    include!(concat!(env!(\"OUT_DIR\"), \"/sandbox/sandbox_ttrpc.rs\"));\n}\n\n#[cfg(feature = \"async\")]\npub mod sandbox_async {\n    include!(concat!(env!(\"OUT_DIR\"), \"/sandbox_async/sandbox_ttrpc.rs\"));\n}\n\npub(crate) mod mount {\n    pub use crate::types::mount::*;\n}\n\npub(crate) mod platform {\n    pub use crate::types::platform::*;\n}\n"
  },
  {
    "path": "crates/shim-protos/src/shim.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\npub mod oci {\n    include!(concat!(env!(\"OUT_DIR\"), \"/shim/oci.rs\"));\n}\n\npub mod event {\n    include!(concat!(env!(\"OUT_DIR\"), \"/shim/event.rs\"));\n}\n\npub mod events {\n    include!(concat!(env!(\"OUT_DIR\"), \"/shim/events.rs\"));\n}\n\npub mod events_ttrpc {\n    include!(concat!(env!(\"OUT_DIR\"), \"/shim/events_ttrpc.rs\"));\n}\n\n#[cfg(feature = \"async\")]\npub mod events_ttrpc_async {\n    include!(concat!(env!(\"OUT_DIR\"), \"/shim_async/events_ttrpc.rs\"));\n}\n\npub mod shim {\n    include!(concat!(env!(\"OUT_DIR\"), \"/shim/shim.rs\"));\n}\n\npub mod shim_ttrpc {\n    include!(concat!(env!(\"OUT_DIR\"), \"/shim/shim_ttrpc.rs\"));\n}\n\n#[cfg(feature = \"async\")]\npub mod shim_ttrpc_async {\n    include!(concat!(env!(\"OUT_DIR\"), \"/shim_async/shim_ttrpc.rs\"));\n}\n\npub(crate) mod empty {\n    pub use crate::types::empty::*;\n}\n\npub(crate) mod mount {\n    pub use crate::types::mount::*;\n}\n\npub(crate) mod task {\n    pub use crate::types::task::*;\n}\n\nmod fieldpath {\n    pub use crate::types::fieldpath::*;\n}\n\nmod gogo {\n    pub use crate::types::gogo::*;\n}\n\n/// Shim events service.\npub use events_ttrpc::{create_events, Events, EventsClient};\n/// Shim task service.\npub use shim_ttrpc::{create_task, Task, TaskClient};\n"
  },
  {
    "path": "crates/shim-protos/src/topics.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n//! Task event topic typically used in shim implementations.\n\npub const TASK_CREATE_EVENT_TOPIC: &str = \"/tasks/create\";\npub const TASK_START_EVENT_TOPIC: &str = \"/tasks/start\";\npub const TASK_OOM_EVENT_TOPIC: &str = \"/tasks/oom\";\npub const TASK_EXIT_EVENT_TOPIC: &str = \"/tasks/exit\";\npub const TASK_DELETE_EVENT_TOPIC: &str = \"/tasks/delete\";\npub const TASK_EXEC_ADDED_EVENT_TOPIC: &str = \"/tasks/exec-added\";\npub const TASK_EXEC_STARTED_EVENT_TOPIC: &str = \"/tasks/exec-started\";\npub const TASK_PAUSED_EVENT_TOPIC: &str = \"/tasks/paused\";\npub const TASK_RESUMED_EVENT_TOPIC: &str = \"/tasks/resumed\";\npub const TASK_CHECKPOINTED_EVENT_TOPIC: &str = \"/tasks/checkpointed\";\npub const TASK_UNKNOWN_TOPIC: &str = \"/tasks/?\";\n"
  },
  {
    "path": "crates/shim-protos/src/types.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\npub mod empty {\n    include!(concat!(env!(\"OUT_DIR\"), \"/types/empty.rs\"));\n}\n\npub mod gogo {\n    include!(concat!(env!(\"OUT_DIR\"), \"/types/gogo.rs\"));\n}\n\npub mod mount {\n    include!(concat!(env!(\"OUT_DIR\"), \"/types/mount.rs\"));\n}\n\npub mod task {\n    include!(concat!(env!(\"OUT_DIR\"), \"/types/task.rs\"));\n}\n\npub mod fieldpath {\n    include!(concat!(env!(\"OUT_DIR\"), \"/types/fieldpath.rs\"));\n}\n\npub mod introspection {\n    include!(concat!(env!(\"OUT_DIR\"), \"/types/introspection.rs\"));\n}\n#[cfg(feature = \"sandbox\")]\npub mod platform {\n    include!(concat!(env!(\"OUT_DIR\"), \"/types/platform.rs\"));\n}\n"
  },
  {
    "path": "crates/shim-protos/src/windows.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\npub mod stats {\n    include!(concat!(env!(\"OUT_DIR\"), \"/stats/stats.rs\"));\n}\n\npub mod metrics {\n    pub use crate::cgroups::metrics::{file_descriptor, Metrics};\n}\n"
  },
  {
    "path": "crates/shim-protos/tests/ttrpc.rs",
    "content": "// Copyright (c) 2021 Alibaba Cloud\n//\n// SPDX-License-Identifier: Apache-2.0\n//\n\nuse std::{\n    collections::HashMap,\n    sync::{mpsc::channel, Arc},\n};\n\nuse containerd_shim_protos::{\n    api::{CreateTaskRequest, CreateTaskResponse, DeleteRequest},\n    shim::shim_ttrpc::create_task,\n    Task,\n};\nuse protobuf::{CodedInputStream, CodedOutputStream, Message};\nuse ttrpc::{Code, MessageHeader, Request, Response, TtrpcContext};\n\nconst MESSAGE_TYPE_REQUEST: u8 = 0x1;\nconst MESSAGE_TYPE_RESPONSE: u8 = 0x2;\n\n#[derive(Debug, PartialEq)]\nstruct FakeServer {\n    magic: u32,\n}\n\nimpl FakeServer {\n    fn new() -> Self {\n        FakeServer { magic: 0xadcbdacf }\n    }\n}\n\nimpl Task for FakeServer {\n    fn create(\n        &self,\n        _ctx: &::ttrpc::TtrpcContext,\n        req: CreateTaskRequest,\n    ) -> ::ttrpc::Result<CreateTaskResponse> {\n        let mut resp = CreateTaskResponse::default();\n\n        assert_eq!(&req.id, \"test1\");\n        resp.set_pid(0x10c0);\n        assert_eq!(resp.compute_size(), 3);\n\n        Ok(resp)\n    }\n}\n\nfn create_ttrpc_context() -> (\n    TtrpcContext,\n    std::sync::mpsc::Receiver<(MessageHeader, Vec<u8>)>,\n) {\n    let (res_tx, rx) = channel();\n    let mh = MessageHeader {\n        type_: MESSAGE_TYPE_REQUEST,\n        ..Default::default()\n    };\n\n    let (_, cancel_rx) = crossbeam::channel::unbounded();\n\n    let ctx = TtrpcContext {\n        fd: -1,\n        mh,\n        res_tx,\n        metadata: HashMap::new(),\n        timeout_nano: 0,\n        cancel_rx,\n    };\n\n    (ctx, rx)\n}\n\n#[test]\nfn test_task_method_num() {\n    let task = create_task(Arc::new(FakeServer::new()));\n    assert_eq!(task.len(), 17);\n}\n\n#[test]\nfn test_create_task() {\n    let mut req = CreateTaskRequest::default();\n    req.set_id(\"test1\".to_owned());\n    let mut buf = Vec::with_capacity(req.compute_size() as usize);\n    {\n        let mut s = CodedOutputStream::vec(&mut buf);\n        req.write_to(&mut s).unwrap();\n        s.flush().unwrap();\n    }\n    assert_eq!(buf.len(), 7);\n\n    let (ctx, rx) = create_ttrpc_context();\n    let mut request = Request::new();\n    request.set_service(\"containerd.task.v2.Task\".to_owned());\n    request.set_method(\"Create\".to_owned());\n    request.set_payload(buf);\n    request.set_timeout_nano(10000);\n    request.set_metadata(ttrpc::context::to_pb(ctx.metadata.clone()));\n\n    let task = create_task(Arc::new(FakeServer::new()));\n    let create = task.get(\"/containerd.task.v2.Task/Create\").unwrap();\n    create.handler(ctx, request).unwrap();\n\n    let (header, msg) = rx.recv().unwrap();\n    assert_eq!(header.length, 7);\n    assert_eq!(header.type_, MESSAGE_TYPE_RESPONSE);\n    assert_eq!(header.flags, 0);\n    assert_eq!(msg.len(), 7);\n\n    let mut s = CodedInputStream::from_bytes(&msg);\n    let mut response = Response::new();\n    response.merge_from(&mut s).unwrap();\n    assert_eq!(response.status().code(), Code::OK);\n\n    let mut s = CodedInputStream::from_bytes(&response.payload);\n    let mut resp = CreateTaskResponse::new();\n    resp.merge_from(&mut s).unwrap();\n    assert_eq!(resp.pid, 0x10c0);\n}\n\n#[test]\nfn test_delete_task() {\n    let mut req = DeleteRequest::default();\n    req.set_id(\"test1\".to_owned());\n    let mut buf = Vec::with_capacity(req.compute_size() as usize);\n    {\n        let mut s = CodedOutputStream::vec(&mut buf);\n        req.write_to(&mut s).unwrap();\n        s.flush().unwrap();\n    }\n    assert_eq!(buf.len(), 7);\n\n    let (ctx, rx) = create_ttrpc_context();\n    let mut request = Request::new();\n    request.set_service(\"containerd.task.v2.Task\".to_owned());\n    request.set_method(\"Delete\".to_owned());\n    request.set_payload(buf);\n    request.set_timeout_nano(10000);\n    request.set_metadata(ttrpc::context::to_pb(ctx.metadata.clone()));\n\n    let task = create_task(Arc::new(FakeServer::new()));\n    let delete = task.get(\"/containerd.task.v2.Task/Delete\").unwrap();\n    delete.handler(ctx, request).unwrap();\n\n    let (header, msg) = rx.recv().unwrap();\n    assert_eq!(header.length, 54);\n    assert_eq!(header.type_, MESSAGE_TYPE_RESPONSE);\n    assert_eq!(header.flags, 0);\n    assert_eq!(msg.len(), 54);\n\n    let mut s = CodedInputStream::from_bytes(&msg);\n    let mut response = Response::new();\n    response.merge_from(&mut s).unwrap();\n    assert_ne!(response.status().code(), Code::OK);\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/README.md",
    "content": "# Vendor\n\nThis directory contains vendor dependencies needed to generate protobuf bindings.\n\nProto files are copy-pasted directly from upstream repos:\n+ https://github.com/containerd/containerd\n+ https://github.com/protocolbuffers/protobuf\n+ https://github.com/gogo/protobuf\n+ https://github.com/containerd/cgroups\n+ https://github.com/microsoft/hcsshim\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/cgroups/cgroup2/stats/metrics.proto",
    "content": "syntax = \"proto3\";\n\npackage io.containerd.cgroups.v2;\n\noption go_package = \"github.com/containerd/cgroups/cgroup2/stats\";\n\nmessage Metrics {\n\tPidsStat pids = 1;\n\tCPUStat cpu = 2;\n\tMemoryStat memory = 4;\n\tRdmaStat rdma = 5;\n\tIOStat io = 6;\n\trepeated HugeTlbStat hugetlb = 7;\n\tMemoryEvents memory_events = 8;\n\trepeated NetworkStat network = 9;\n}\n\nmessage PSIData {\n\tdouble avg10 = 1;\n\tdouble avg60 = 2;\n\tdouble avg300 = 3;\n\tuint64 total = 4;\n}\n\nmessage PSIStats {\n\tPSIData some = 1;\n\tPSIData full = 2;\n}\n\nmessage PidsStat {\n\tuint64 current = 1;\n\tuint64 limit = 2;\n}\n\nmessage CPUStat {\n\tuint64 usage_usec  = 1;\n\tuint64 user_usec = 2;\n\tuint64 system_usec = 3;\n\tuint64 nr_periods = 4;\n\tuint64 nr_throttled = 5;\n\tuint64 throttled_usec = 6;\n\tPSIStats psi = 7;\n\tuint64 nr_bursts = 8;\n\tuint64 burst_usec = 9;\n}\n\nmessage MemoryStat {\n\tuint64 anon = 1;\n\tuint64 file = 2;\n\tuint64 kernel_stack = 3;\n\tuint64 slab = 4;\n\tuint64 sock = 5;\n\tuint64 shmem = 6;\n\tuint64 file_mapped = 7;\n\tuint64 file_dirty = 8;\n\tuint64 file_writeback = 9;\n\tuint64 anon_thp = 10;\n\tuint64 inactive_anon = 11;\n\tuint64 active_anon = 12;\n\tuint64 inactive_file = 13;\n\tuint64 active_file = 14;\n\tuint64 unevictable = 15;\n\tuint64 slab_reclaimable = 16;\n\tuint64 slab_unreclaimable = 17;\n\tuint64 pgfault = 18;\n\tuint64 pgmajfault = 19;\n\tuint64 workingset_refault = 20;\n\tuint64 workingset_activate = 21;\n\tuint64 workingset_nodereclaim = 22;\n\tuint64 pgrefill = 23;\n\tuint64 pgscan = 24;\n\tuint64 pgsteal = 25;\n\tuint64 pgactivate = 26;\n\tuint64 pgdeactivate = 27;\n\tuint64 pglazyfree = 28;\n\tuint64 pglazyfreed = 29;\n\tuint64 thp_fault_alloc = 30;\n\tuint64 thp_collapse_alloc = 31;\n\tuint64 usage = 32;\n\tuint64 usage_limit = 33;\n\tuint64 swap_usage = 34;\n\tuint64 swap_limit = 35;\n\tuint64 max_usage = 36;\n\tuint64 swap_max_usage = 37;\n\tPSIStats psi = 38;\n}\n\nmessage MemoryEvents {\n\tuint64 low = 1;\n\tuint64 high = 2;\n\tuint64 max = 3;\n\tuint64 oom = 4;\n\tuint64 oom_kill = 5;\n\tuint64 oom_group_kill = 6;\n}\n\nmessage RdmaStat {\n\trepeated RdmaEntry current = 1;\n\trepeated RdmaEntry limit = 2;\n}\n\nmessage RdmaEntry {\n\tstring device = 1;\n\tuint32 hca_handles = 2;\n\tuint32 hca_objects = 3;\n}\n\nmessage IOStat {\n\trepeated IOEntry usage = 1;\n\tPSIStats psi = 2;\n}\n\nmessage IOEntry {\n\tuint64 major = 1;\n\tuint64 minor = 2;\n\tuint64 rbytes = 3;\n\tuint64 wbytes = 4;\n\tuint64 rios = 5;\n\tuint64 wios = 6;\n}\n\nmessage HugeTlbStat {\n\tuint64 current = 1;\n\tuint64 max = 2;\n\tstring pagesize = 3;\n\tuint64 failcnt = 4;\n}\n\nmessage NetworkStat {\n\tstring name = 1;\n\tuint64 rx_bytes = 2;\n\tuint64 rx_packets = 3;\n\tuint64 rx_errors  = 4;\n\tuint64 rx_dropped = 5;\n\tuint64 tx_bytes = 6;\n\tuint64 tx_packets = 7;\n\tuint64 tx_errors = 8;\n\tuint64 tx_dropped = 9;\n}"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto",
    "content": "syntax = \"proto3\";\n\npackage io.containerd.cgroups.v1;\n\nimport \"gogoproto/gogo.proto\";\n\nmessage Metrics {\n\trepeated HugetlbStat hugetlb = 1;\n\tPidsStat pids = 2;\n\tCPUStat cpu = 3 [(gogoproto.customname) = \"CPU\"];\n\tMemoryStat memory = 4;\n\tBlkIOStat blkio = 5;\n\tRdmaStat rdma = 6;\n\trepeated NetworkStat network = 7;\n\tCgroupStats cgroup_stats = 8;\n\tMemoryOomControl memory_oom_control = 9;\n}\n\nmessage HugetlbStat {\n\tuint64 usage = 1;\n\tuint64 max = 2;\n\tuint64 failcnt = 3;\n\tstring pagesize = 4;\n}\n\nmessage PidsStat {\n\tuint64 current = 1;\n\tuint64 limit = 2;\n}\n\nmessage CPUStat {\n\tCPUUsage usage = 1;\n\tThrottle throttling = 2;\n}\n\nmessage CPUUsage {\n\t// values in nanoseconds\n\tuint64 total = 1;\n\tuint64 kernel = 2;\n\tuint64 user = 3;\n\trepeated uint64 per_cpu = 4 [(gogoproto.customname) = \"PerCPU\"];\n\n}\n\nmessage Throttle {\n\tuint64 periods = 1;\n\tuint64 throttled_periods = 2;\n\tuint64 throttled_time = 3;\n}\n\nmessage MemoryStat {\n\tuint64 cache = 1;\n\tuint64 rss = 2 [(gogoproto.customname) = \"RSS\"];\n\tuint64 rss_huge = 3 [(gogoproto.customname) = \"RSSHuge\"];\n\tuint64 mapped_file = 4;\n\tuint64 dirty = 5;\n\tuint64 writeback = 6;\n\tuint64 pg_pg_in = 7;\n\tuint64 pg_pg_out = 8;\n\tuint64 pg_fault = 9;\n\tuint64 pg_maj_fault = 10;\n\tuint64 inactive_anon = 11;\n\tuint64 active_anon = 12;\n\tuint64 inactive_file = 13;\n\tuint64 active_file = 14;\n\tuint64 unevictable = 15;\n\tuint64 hierarchical_memory_limit = 16;\n\tuint64 hierarchical_swap_limit = 17;\n\tuint64 total_cache = 18;\n\tuint64 total_rss = 19 [(gogoproto.customname) = \"TotalRSS\"];\n\tuint64 total_rss_huge = 20 [(gogoproto.customname) = \"TotalRSSHuge\"];\n\tuint64 total_mapped_file = 21;\n\tuint64 total_dirty = 22;\n\tuint64 total_writeback = 23;\n\tuint64 total_pg_pg_in = 24;\n\tuint64 total_pg_pg_out = 25;\n\tuint64 total_pg_fault = 26;\n\tuint64 total_pg_maj_fault = 27;\n\tuint64 total_inactive_anon = 28;\n\tuint64 total_active_anon = 29;\n\tuint64 total_inactive_file = 30;\n\tuint64 total_active_file = 31;\n\tuint64 total_unevictable = 32;\n\tMemoryEntry usage = 33;\n\tMemoryEntry swap = 34;\n\tMemoryEntry kernel = 35;\n\tMemoryEntry kernel_tcp = 36 [(gogoproto.customname) = \"KernelTCP\"];\n\n}\n\nmessage MemoryEntry {\n\tuint64 limit = 1;\n\tuint64 usage = 2;\n\tuint64 max = 3;\n\tuint64 failcnt = 4;\n}\n\nmessage MemoryOomControl {\n\tuint64 oom_kill_disable = 1;\n\tuint64 under_oom = 2;\n\tuint64 oom_kill = 3;\n}\n\nmessage BlkIOStat {\n\trepeated BlkIOEntry io_service_bytes_recursive = 1;\n\trepeated BlkIOEntry io_serviced_recursive = 2;\n\trepeated BlkIOEntry io_queued_recursive = 3;\n\trepeated BlkIOEntry io_service_time_recursive = 4;\n\trepeated BlkIOEntry io_wait_time_recursive = 5;\n\trepeated BlkIOEntry io_merged_recursive = 6;\n\trepeated BlkIOEntry io_time_recursive = 7;\n\trepeated BlkIOEntry sectors_recursive = 8;\n}\n\nmessage BlkIOEntry {\n\tstring op = 1;\n\tstring device = 2;\n\tuint64 major = 3;\n\tuint64 minor = 4;\n\tuint64 value = 5;\n}\n\nmessage RdmaStat {\n\trepeated RdmaEntry current = 1;\n\trepeated RdmaEntry limit = 2;\n}\n\nmessage RdmaEntry {\n\tstring device = 1;\n\tuint32 hca_handles = 2;\n\tuint32 hca_objects = 3;\n}\n\nmessage NetworkStat {\n\tstring name = 1;\n\tuint64 rx_bytes = 2;\n\tuint64 rx_packets = 3;\n\tuint64 rx_errors  = 4;\n\tuint64 rx_dropped = 5;\n\tuint64 tx_bytes = 6;\n\tuint64 tx_packets = 7;\n\tuint64 tx_errors = 8;\n\tuint64 tx_dropped = 9;\n}\n\n// CgroupStats exports per-cgroup statistics.\nmessage CgroupStats {\n\t// number of tasks sleeping\n\tuint64 nr_sleeping = 1;\n\t// number of tasks running\n\tuint64 nr_running = 2;\n\t// number of tasks in stopped state\n\tuint64 nr_stopped = 3;\n\t// number of tasks in uninterruptible state\n\tuint64 nr_uninterruptible = 4;\n\t// number of tasks waiting on IO\n\tuint64 nr_io_wait = 5;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto",
    "content": "syntax = \"proto3\";\n\npackage io.containerd.cgroups.v1;\n\noption go_package = \"github.com/containerd/cgroups/cgroup1/stats\";\n\nmessage Metrics {\n\trepeated HugetlbStat hugetlb = 1;\n\tPidsStat pids = 2;\n\tCPUStat cpu = 3;\n\tMemoryStat memory = 4;\n\tBlkIOStat blkio = 5;\n\tRdmaStat rdma = 6;\n\trepeated NetworkStat network = 7;\n\tCgroupStats cgroup_stats = 8;\n\tMemoryOomControl memory_oom_control = 9;\n}\n\nmessage HugetlbStat {\n\tuint64 usage = 1;\n\tuint64 max = 2;\n\tuint64 failcnt = 3;\n\tstring pagesize = 4;\n}\n\nmessage PidsStat {\n\tuint64 current = 1;\n\tuint64 limit = 2;\n}\n\nmessage CPUStat {\n\tCPUUsage usage = 1;\n\tThrottle throttling = 2;\n}\n\nmessage CPUUsage {\n\t// values in nanoseconds\n\tuint64 total = 1;\n\tuint64 kernel = 2;\n\tuint64 user = 3;\n\trepeated uint64 per_cpu = 4;\n\n}\n\nmessage Throttle {\n\tuint64 periods = 1;\n\tuint64 throttled_periods = 2;\n\tuint64 throttled_time = 3;\n}\n\nmessage MemoryStat {\n\tuint64 cache = 1;\n\tuint64 rss = 2;\n\tuint64 rss_huge = 3;\n\tuint64 mapped_file = 4;\n\tuint64 dirty = 5;\n\tuint64 writeback = 6;\n\tuint64 pg_pg_in = 7;\n\tuint64 pg_pg_out = 8;\n\tuint64 pg_fault = 9;\n\tuint64 pg_maj_fault = 10;\n\tuint64 inactive_anon = 11;\n\tuint64 active_anon = 12;\n\tuint64 inactive_file = 13;\n\tuint64 active_file = 14;\n\tuint64 unevictable = 15;\n\tuint64 hierarchical_memory_limit = 16;\n\tuint64 hierarchical_swap_limit = 17;\n\tuint64 total_cache = 18;\n\tuint64 total_rss = 19;\n\tuint64 total_rss_huge = 20;\n\tuint64 total_mapped_file = 21;\n\tuint64 total_dirty = 22;\n\tuint64 total_writeback = 23;\n\tuint64 total_pg_pg_in = 24;\n\tuint64 total_pg_pg_out = 25;\n\tuint64 total_pg_fault = 26;\n\tuint64 total_pg_maj_fault = 27;\n\tuint64 total_inactive_anon = 28;\n\tuint64 total_active_anon = 29;\n\tuint64 total_inactive_file = 30;\n\tuint64 total_active_file = 31;\n\tuint64 total_unevictable = 32;\n\tMemoryEntry usage = 33;\n\tMemoryEntry swap = 34;\n\tMemoryEntry kernel = 35;\n\tMemoryEntry kernel_tcp = 36;\n\n}\n\nmessage MemoryEntry {\n\tuint64 limit = 1;\n\tuint64 usage = 2;\n\tuint64 max = 3;\n\tuint64 failcnt = 4;\n}\n\nmessage MemoryOomControl {\n\tuint64 oom_kill_disable = 1;\n\tuint64 under_oom = 2;\n\tuint64 oom_kill = 3;\n}\n\nmessage BlkIOStat {\n\trepeated BlkIOEntry io_service_bytes_recursive = 1;\n\trepeated BlkIOEntry io_serviced_recursive = 2;\n\trepeated BlkIOEntry io_queued_recursive = 3;\n\trepeated BlkIOEntry io_service_time_recursive = 4;\n\trepeated BlkIOEntry io_wait_time_recursive = 5;\n\trepeated BlkIOEntry io_merged_recursive = 6;\n\trepeated BlkIOEntry io_time_recursive = 7;\n\trepeated BlkIOEntry sectors_recursive = 8;\n}\n\nmessage BlkIOEntry {\n\tstring op = 1;\n\tstring device = 2;\n\tuint64 major = 3;\n\tuint64 minor = 4;\n\tuint64 value = 5;\n}\n\nmessage RdmaStat {\n\trepeated RdmaEntry current = 1;\n\trepeated RdmaEntry limit = 2;\n}\n\nmessage RdmaEntry {\n\tstring device = 1;\n\tuint32 hca_handles = 2;\n\tuint32 hca_objects = 3;\n}\n\nmessage NetworkStat {\n\tstring name = 1;\n\tuint64 rx_bytes = 2;\n\tuint64 rx_packets = 3;\n\tuint64 rx_errors  = 4;\n\tuint64 rx_dropped = 5;\n\tuint64 tx_bytes = 6;\n\tuint64 tx_packets = 7;\n\tuint64 tx_errors = 8;\n\tuint64 tx_dropped = 9;\n}\n\n// CgroupStats exports per-cgroup statistics.\nmessage CgroupStats {\n\t// number of tasks sleeping\n\tuint64 nr_sleeping = 1;\n\t// number of tasks running\n\tuint64 nr_running = 2;\n\t// number of tasks in stopped state\n\tuint64 nr_stopped = 3;\n\t// number of tasks in uninterruptible state\n\tuint64 nr_uninterruptible = 4;\n\t// number of tasks waiting on IO\n\tuint64 nr_io_wait = 5;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/events/container.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"google/protobuf/any.proto\";\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage ContainerCreate {\n  string id = 1;\n  string image = 2;\n  message Runtime {\n    string name = 1;\n    google.protobuf.Any options = 2;\n  }\n  Runtime runtime = 3;\n}\n\nmessage ContainerUpdate {\n  string id = 1;\n  string image = 2;\n  map<string, string> labels = 3;\n  string snapshot_key = 4;\n}\n\nmessage ContainerDelete {\n  string id = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/events/content.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage ContentCreate {\n  string digest = 1;\n  int64 size = 2;\n}\n\nmessage ContentDelete {\n  string digest = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/events/image.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.images.v1;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage ImageCreate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage ImageUpdate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage ImageDelete {\n  string name = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/events/namespace.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage NamespaceCreate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage NamespaceUpdate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage NamespaceDelete {\n  string name = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/events/sandbox.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\n\nmessage SandboxCreate {\n  string sandbox_id = 1;\n}\n\nmessage SandboxStart {\n  string sandbox_id = 1;\n}\n\nmessage SandboxExit {\n  string sandbox_id = 1;\n  uint32 exit_status = 2;\n  google.protobuf.Timestamp exited_at = 3;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/events/snapshot.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage SnapshotPrepare {\n  string key = 1;\n  string parent = 2;\n  string snapshotter = 5;\n}\n\nmessage SnapshotCommit {\n  string key = 1;\n  string name = 2;\n  string snapshotter = 5;\n}\n\nmessage SnapshotRemove {\n  string key = 1;\n  string snapshotter = 5;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/events/task.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/fieldpath.proto\";\nimport \"types/mount.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage TaskCreate {\n  string container_id = 1;\n  string bundle = 2;\n  repeated containerd.types.Mount rootfs = 3;\n  TaskIO io = 4;\n  string checkpoint = 5;\n  uint32 pid = 6;\n}\n\nmessage TaskStart {\n  string container_id = 1;\n  uint32 pid = 2;\n}\n\nmessage TaskDelete {\n  string container_id = 1;\n  uint32 pid = 2;\n  uint32 exit_status = 3;\n  google.protobuf.Timestamp exited_at = 4;\n  // id is the specific exec. By default if omitted will be `\"\"` thus matches\n  // the init exec of the task matching `container_id`.\n  string id = 5;\n}\n\nmessage TaskIO {\n  string stdin = 1;\n  string stdout = 2;\n  string stderr = 3;\n  bool terminal = 4;\n}\n\nmessage TaskExit {\n  string container_id = 1;\n  string id = 2;\n  uint32 pid = 3;\n  uint32 exit_status = 4;\n  google.protobuf.Timestamp exited_at = 5;\n}\n\nmessage TaskOOM {\n  string container_id = 1;\n}\n\nmessage TaskExecAdded {\n  string container_id = 1;\n  string exec_id = 2;\n}\n\nmessage TaskExecStarted {\n  string container_id = 1;\n  string exec_id = 2;\n  uint32 pid = 3;\n}\n\nmessage TaskPaused {\n  string container_id = 1;\n}\n\nmessage TaskResumed {\n  string container_id = 1;\n}\n\nmessage TaskCheckpointed {\n  string container_id = 1;\n  string checkpoint = 2;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.runtime.sandbox.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/metrics.proto\";\nimport \"types/mount.proto\";\nimport \"types/platform.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/runtime/sandbox/v1;sandbox\";\n\n// Sandbox is an optional interface that shim may implement to support sandboxes environments.\n// A typical example of sandbox is microVM or pause container - an entity that groups containers and/or\n// holds resources relevant for this group.\nservice Sandbox {\n  // CreateSandbox will be called right after sandbox shim instance launched.\n  // It is a good place to initialize sandbox environment.\n  rpc CreateSandbox(CreateSandboxRequest) returns (CreateSandboxResponse);\n\n  // StartSandbox will start a previously created sandbox.\n  rpc StartSandbox(StartSandboxRequest) returns (StartSandboxResponse);\n\n  // Platform queries the platform the sandbox is going to run containers on.\n  // containerd will use this to generate a proper OCI spec.\n  rpc Platform(PlatformRequest) returns (PlatformResponse);\n\n  // StopSandbox will stop existing sandbox instance\n  rpc StopSandbox(StopSandboxRequest) returns (StopSandboxResponse);\n\n  // WaitSandbox blocks until sandbox exits.\n  rpc WaitSandbox(WaitSandboxRequest) returns (WaitSandboxResponse);\n\n  // SandboxStatus will return current status of the running sandbox instance\n  rpc SandboxStatus(SandboxStatusRequest) returns (SandboxStatusResponse);\n\n  // PingSandbox is a lightweight API call to check whether sandbox alive.\n  rpc PingSandbox(PingRequest) returns (PingResponse);\n\n  // ShutdownSandbox must shutdown shim instance.\n  rpc ShutdownSandbox(ShutdownSandboxRequest) returns (ShutdownSandboxResponse);\n\n  // SandboxMetrics retrieves metrics about a sandbox instance.\n  rpc SandboxMetrics(SandboxMetricsRequest) returns (SandboxMetricsResponse);\n}\n\nmessage CreateSandboxRequest {\n  string sandbox_id = 1;\n  string bundle_path = 2;\n  repeated containerd.types.Mount rootfs = 3;\n  google.protobuf.Any options = 4;\n  string netns_path = 5;\n  map<string, string> annotations = 6;\n}\n\nmessage CreateSandboxResponse {}\n\nmessage StartSandboxRequest {\n  string sandbox_id = 1;\n}\n\nmessage StartSandboxResponse {\n  uint32 pid = 1;\n  google.protobuf.Timestamp created_at = 2;\n  google.protobuf.Any spec = 3;\n}\n\nmessage PlatformRequest {\n  string sandbox_id = 1;\n}\n\nmessage PlatformResponse {\n  containerd.types.Platform platform = 1;\n}\n\nmessage StopSandboxRequest {\n  string sandbox_id = 1;\n  uint32 timeout_secs = 2;\n}\n\nmessage StopSandboxResponse {}\n\nmessage UpdateSandboxRequest {\n  string sandbox_id = 1;\n  google.protobuf.Any resources = 2;\n  map<string, string> annotations = 3;\n}\n\nmessage WaitSandboxRequest {\n  string sandbox_id = 1;\n}\n\nmessage WaitSandboxResponse {\n  uint32 exit_status = 1;\n  google.protobuf.Timestamp exited_at = 2;\n}\n\nmessage UpdateSandboxResponse {}\n\nmessage SandboxStatusRequest {\n  string sandbox_id = 1;\n  bool verbose = 2;\n}\n\nmessage SandboxStatusResponse {\n  string sandbox_id = 1;\n  uint32 pid = 2;\n  string state = 3;\n  map<string, string> info = 4;\n  google.protobuf.Timestamp created_at = 5;\n  google.protobuf.Timestamp exited_at = 6;\n  google.protobuf.Any extra = 7;\n}\n\nmessage PingRequest {\n  string sandbox_id = 1;\n}\n\nmessage PingResponse {}\n\nmessage ShutdownSandboxRequest {\n  string sandbox_id = 1;\n}\n\nmessage ShutdownSandboxResponse {}\n\nmessage SandboxMetricsRequest {\n  string sandbox_id = 1;\n}\n\nmessage SandboxMetricsResponse {\n  containerd.types.Metric metrics = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.task.v2;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/mount.proto\";\nimport \"types/task/task.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/runtime/task/v2;task\";\n\n// Shim service is launched for each container and is responsible for owning the IO\n// for the container and its additional processes.  The shim is also the parent of\n// each container and allows reattaching to the IO and receiving the exit status\n// for the container processes.\nservice Task {\n  rpc State(StateRequest) returns (StateResponse);\n  rpc Create(CreateTaskRequest) returns (CreateTaskResponse);\n  rpc Start(StartRequest) returns (StartResponse);\n  rpc Delete(DeleteRequest) returns (DeleteResponse);\n  rpc Pids(PidsRequest) returns (PidsResponse);\n  rpc Pause(PauseRequest) returns (google.protobuf.Empty);\n  rpc Resume(ResumeRequest) returns (google.protobuf.Empty);\n  rpc Checkpoint(CheckpointTaskRequest) returns (google.protobuf.Empty);\n  rpc Kill(KillRequest) returns (google.protobuf.Empty);\n  rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty);\n  rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty);\n  rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty);\n  rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty);\n  rpc Wait(WaitRequest) returns (WaitResponse);\n  rpc Stats(StatsRequest) returns (StatsResponse);\n  rpc Connect(ConnectRequest) returns (ConnectResponse);\n  rpc Shutdown(ShutdownRequest) returns (google.protobuf.Empty);\n}\n\nmessage CreateTaskRequest {\n  string id = 1;\n  string bundle = 2;\n  repeated containerd.types.Mount rootfs = 3;\n  bool terminal = 4;\n  string stdin = 5;\n  string stdout = 6;\n  string stderr = 7;\n  string checkpoint = 8;\n  string parent_checkpoint = 9;\n  google.protobuf.Any options = 10;\n}\n\nmessage CreateTaskResponse {\n  uint32 pid = 1;\n}\n\nmessage DeleteRequest {\n  string id = 1;\n  string exec_id = 2;\n}\n\nmessage DeleteResponse {\n  uint32 pid = 1;\n  uint32 exit_status = 2;\n  google.protobuf.Timestamp exited_at = 3;\n}\n\nmessage ExecProcessRequest {\n  string id = 1;\n  string exec_id = 2;\n  bool terminal = 3;\n  string stdin = 4;\n  string stdout = 5;\n  string stderr = 6;\n  google.protobuf.Any spec = 7;\n}\n\nmessage ExecProcessResponse {}\n\nmessage ResizePtyRequest {\n  string id = 1;\n  string exec_id = 2;\n  uint32 width = 3;\n  uint32 height = 4;\n}\n\nmessage StateRequest {\n  string id = 1;\n  string exec_id = 2;\n}\n\nmessage StateResponse {\n  string id = 1;\n  string bundle = 2;\n  uint32 pid = 3;\n  containerd.v1.types.Status status = 4;\n  string stdin = 5;\n  string stdout = 6;\n  string stderr = 7;\n  bool terminal = 8;\n  uint32 exit_status = 9;\n  google.protobuf.Timestamp exited_at = 10;\n  string exec_id = 11;\n}\n\nmessage KillRequest {\n  string id = 1;\n  string exec_id = 2;\n  uint32 signal = 3;\n  bool all = 4;\n}\n\nmessage CloseIORequest {\n  string id = 1;\n  string exec_id = 2;\n  bool stdin = 3;\n}\n\nmessage PidsRequest {\n  string id = 1;\n}\n\nmessage PidsResponse {\n  repeated containerd.v1.types.ProcessInfo processes = 1;\n}\n\nmessage CheckpointTaskRequest {\n  string id = 1;\n  string path = 2;\n  google.protobuf.Any options = 3;\n}\n\nmessage UpdateTaskRequest {\n  string id = 1;\n  google.protobuf.Any resources = 2;\n  map<string, string> annotations = 3;\n}\n\nmessage StartRequest {\n  string id = 1;\n  string exec_id = 2;\n}\n\nmessage StartResponse {\n  uint32 pid = 1;\n}\n\nmessage WaitRequest {\n  string id = 1;\n  string exec_id = 2;\n}\n\nmessage WaitResponse {\n  uint32 exit_status = 1;\n  google.protobuf.Timestamp exited_at = 2;\n}\n\nmessage StatsRequest {\n  string id = 1;\n}\n\nmessage StatsResponse {\n  google.protobuf.Any stats = 1;\n}\n\nmessage ConnectRequest {\n  string id = 1;\n}\n\nmessage ConnectResponse {\n  uint32 shim_pid = 1;\n  uint32 task_pid = 2;\n  string version = 3;\n}\n\nmessage ShutdownRequest {\n  string id = 1;\n  bool now = 2;\n}\n\nmessage PauseRequest {\n  string id = 1;\n}\n\nmessage ResumeRequest {\n  string id = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.events.ttrpc.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"types/event.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/ttrpc/events/v1;events\";\n\nservice Events {\n  // Forward sends an event that has already been packaged into an envelope\n  // with a timestamp and namespace.\n  //\n  // This is useful if earlier timestamping is required or when forwarding on\n  // behalf of another component, namespace or publisher.\n  rpc Forward(ForwardRequest) returns (google.protobuf.Empty);\n}\n\nmessage ForwardRequest {\n  containerd.types.Envelope envelope = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/types/descriptor.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Descriptor describes a blob in a content store.\n//\n// This descriptor can be used to reference content from an\n// oci descriptor found in a manifest.\n// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor\nmessage Descriptor {\n  string media_type = 1;\n  string digest = 2;\n  int64 size = 3;\n  map<string, string> annotations = 5;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/types/event.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nmessage Envelope {\n  option (containerd.types.fieldpath) = true;\n  google.protobuf.Timestamp timestamp = 1;\n  string namespace = 2;\n  string topic = 3;\n  google.protobuf.Any event = 4;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/types/fieldpath.proto",
    "content": "// Protocol Buffers for Go with Gadgets\n//\n// Copyright (c) 2013, The GoGo Authors. All rights reserved.\n// http://github.com/gogo/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\npackage containerd.types;\n\nimport \"google/protobuf/descriptor.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nextend google.protobuf.FileOptions {\n  optional bool fieldpath_all = 63300;\n}\n\nextend google.protobuf.MessageOptions {\n  optional bool fieldpath = 64400;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/types/introspection.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nmessage RuntimeRequest {\n  string runtime_path = 1;\n  // Options correspond to CreateTaskRequest.options.\n  // This is needed to pass the runc binary path, etc.\n  google.protobuf.Any options = 2;\n}\n\nmessage RuntimeVersion {\n  string version = 1;\n  string revision = 2;\n}\n\nmessage RuntimeInfo {\n  string name = 1;\n  RuntimeVersion version = 2;\n  // Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.)\n  google.protobuf.Any options = 3;\n  // OCI-compatible runtimes should use https://github.com/opencontainers/runtime-spec/blob/main/features.md\n  google.protobuf.Any features = 4;\n  // Annotations of the shim. Irrelevant to features.Annotations.\n  map<string, string> annotations = 5;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/types/metrics.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nmessage Metric {\n  google.protobuf.Timestamp timestamp = 1;\n  string id = 2;\n  google.protobuf.Any data = 3;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/types/mount.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Mount describes mounts for a container.\n//\n// This type is the lingua franca of ContainerD. All services provide mounts\n// to be used with the container at creation time.\n//\n// The Mount type follows the structure of the mount syscall, including a type,\n// source, target and options.\nmessage Mount {\n  // Type defines the nature of the mount.\n  string type = 1;\n\n  // Source specifies the name of the mount. Depending on mount type, this\n  // may be a volume name or a host path, or even ignored.\n  string source = 2;\n\n  // Target path in container\n  string target = 3;\n\n  // Options specifies zero or more fstab style mount options.\n  repeated string options = 4;\n}\n\nmessage ActiveMount {\n  Mount mount = 1;\n\n  google.protobuf.Timestamp mounted_at = 2;\n\n  string mount_point = 3;\n\n  map<string, string> data = 4;\n}\n\nmessage ActivationInfo {\n  string name = 1;\n\n  repeated ActiveMount active = 2;\n\n  repeated Mount system = 3;\n\n  map<string, string> labels = 4;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/types/platform.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Platform follows the structure of the OCI platform specification, from\n// descriptors.\nmessage Platform {\n  string os = 1;\n  string architecture = 2;\n  string variant = 3;\n  string os_version = 4;\n  repeated string os_features = 5;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/types/runc/options/oci.proto",
    "content": "syntax = \"proto3\";\n\npackage containerd.runc.v1;\n\noption go_package = \"github.com/containerd/containerd/api/types/runc/options;options\";\n\nmessage Options {\n  // disable pivot root when creating a container\n  bool no_pivot_root = 1;\n  // create a new keyring for the container\n  bool no_new_keyring = 2;\n  // place the shim in a cgroup\n  string shim_cgroup = 3;\n  // set the I/O's pipes uid\n  uint32 io_uid = 4;\n  // set the I/O's pipes gid\n  uint32 io_gid = 5;\n  // binary name of the runc binary\n  string binary_name = 6;\n  // runc root directory\n  string root = 7;\n  // criu binary path.\n  //\n  // Removed in containerd v2.0: string criu_path = 8;\n  reserved 8;\n  // enable systemd cgroups\n  bool systemd_cgroup = 9;\n  // criu image path\n  string criu_image_path = 10;\n  // criu work path\n  string criu_work_path = 11;\n  // task api address, can be a unix domain socket, or vsock address.\n  // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://<vsock cid>:<port>.\n  string task_api_address = 12;\n  // task api version, currently supported value is 2 and 3.\n  uint32 task_api_version = 13;\n}\n\nmessage CheckpointOptions {\n  // exit the container after a checkpoint\n  bool exit = 1;\n  // checkpoint open tcp connections\n  bool open_tcp = 2;\n  // checkpoint external unix sockets\n  bool external_unix_sockets = 3;\n  // checkpoint terminals (ptys)\n  bool terminal = 4;\n  // allow checkpointing of file locks\n  bool file_locks = 5;\n  // restore provided namespaces as empty namespaces\n  repeated string empty_namespaces = 6;\n  // set the cgroups mode, soft, full, strict\n  string cgroups_mode = 7;\n  // checkpoint image path\n  string image_path = 8;\n  // checkpoint work path\n  string work_path = 9;\n}\n\nmessage ProcessDetails {\n  // exec process id if the process is managed by a shim\n  string exec_id = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/types/sandbox.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Sandbox represents a sandbox metadata object that keeps all info required by controller to\n// work with a particular instance.\nmessage Sandbox {\n  // SandboxID is a unique instance identifier within namespace\n  string sandbox_id = 1;\n  message Runtime {\n    // Name is the name of the runtime.\n    string name = 1;\n    // Options specify additional runtime initialization options for the shim (this data will be available in StartShim).\n    // Typically this data expected to be runtime shim implementation specific.\n    google.protobuf.Any options = 2;\n  }\n  // Runtime specifies which runtime to use for executing this container.\n  Runtime runtime = 2;\n  // Spec is sandbox configuration (kin of OCI runtime spec), spec's data will be written to a config.json file in the\n  // bundle directory (similary to OCI spec).\n  google.protobuf.Any spec = 3;\n  // Labels provides an area to include arbitrary data on containers.\n  map<string, string> labels = 4;\n  // CreatedAt is the time the container was first created.\n  google.protobuf.Timestamp created_at = 5;\n  // UpdatedAt is the last time the container was mutated.\n  google.protobuf.Timestamp updated_at = 6;\n  // Extensions allow clients to provide optional blobs that can be handled by runtime.\n  map<string, google.protobuf.Any> extensions = 7;\n  // Sandboxer is the name of the sandbox controller who manages the sandbox.\n  string sandboxer = 10;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/api/types/task/task.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.v1.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types/task\";\n\nenum Status {\n  UNKNOWN = 0;\n  CREATED = 1;\n  RUNNING = 2;\n  STOPPED = 3;\n  PAUSED = 4;\n  PAUSING = 5;\n}\n\nmessage Process {\n  string container_id = 1;\n  string id = 2;\n  uint32 pid = 3;\n  Status status = 4;\n  string stdin = 5;\n  string stdout = 6;\n  string stderr = 7;\n  bool terminal = 8;\n  uint32 exit_status = 9;\n  google.protobuf.Timestamp exited_at = 10;\n}\n\nmessage ProcessInfo {\n  // PID is the process ID.\n  uint32 pid = 1;\n  // Info contains additional process information.\n  //\n  // Info varies by platform.\n  google.protobuf.Any info = 2;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/container.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"google/protobuf/any.proto\";\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage ContainerCreate {\n  string id = 1;\n  string image = 2;\n  message Runtime {\n    string name = 1;\n    google.protobuf.Any options = 2;\n  }\n  Runtime runtime = 3;\n}\n\nmessage ContainerUpdate {\n  string id = 1;\n  string image = 2;\n  map<string, string> labels = 3;\n  string snapshot_key = 4;\n}\n\nmessage ContainerDelete {\n  string id = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/content.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage ContentCreate {\n  string digest = 1;\n  int64 size = 2;\n}\n\nmessage ContentDelete {\n  string digest = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/image.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.images.v1;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage ImageCreate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage ImageUpdate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage ImageDelete {\n  string name = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/namespace.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage NamespaceCreate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage NamespaceUpdate {\n  string name = 1;\n  map<string, string> labels = 2;\n}\n\nmessage NamespaceDelete {\n  string name = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/sandbox.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\n\nmessage SandboxCreate {\n  string sandbox_id = 1;\n}\n\nmessage SandboxStart {\n  string sandbox_id = 1;\n}\n\nmessage SandboxExit {\n  string sandbox_id = 1;\n  uint32 exit_status = 2;\n  google.protobuf.Timestamp exited_at = 3;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/snapshot.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage SnapshotPrepare {\n  string key = 1;\n  string parent = 2;\n  string snapshotter = 5;\n}\n\nmessage SnapshotCommit {\n  string key = 1;\n  string name = 2;\n  string snapshotter = 5;\n}\n\nmessage SnapshotRemove {\n  string key = 1;\n  string snapshotter = 5;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/events/task.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.events;\n\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/fieldpath.proto\";\nimport \"types/mount.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/events;events\";\noption (containerd.types.fieldpath_all) = true;\n\nmessage TaskCreate {\n  string container_id = 1;\n  string bundle = 2;\n  repeated containerd.types.Mount rootfs = 3;\n  TaskIO io = 4;\n  string checkpoint = 5;\n  uint32 pid = 6;\n}\n\nmessage TaskStart {\n  string container_id = 1;\n  uint32 pid = 2;\n}\n\nmessage TaskDelete {\n  string container_id = 1;\n  uint32 pid = 2;\n  uint32 exit_status = 3;\n  google.protobuf.Timestamp exited_at = 4;\n  // id is the specific exec. By default if omitted will be `\"\"` thus matches\n  // the init exec of the task matching `container_id`.\n  string id = 5;\n}\n\nmessage TaskIO {\n  string stdin = 1;\n  string stdout = 2;\n  string stderr = 3;\n  bool terminal = 4;\n}\n\nmessage TaskExit {\n  string container_id = 1;\n  string id = 2;\n  uint32 pid = 3;\n  uint32 exit_status = 4;\n  google.protobuf.Timestamp exited_at = 5;\n}\n\nmessage TaskOOM {\n  string container_id = 1;\n}\n\nmessage TaskExecAdded {\n  string container_id = 1;\n  string exec_id = 2;\n}\n\nmessage TaskExecStarted {\n  string container_id = 1;\n  string exec_id = 2;\n  uint32 pid = 3;\n}\n\nmessage TaskPaused {\n  string container_id = 1;\n}\n\nmessage TaskResumed {\n  string container_id = 1;\n}\n\nmessage TaskCheckpointed {\n  string container_id = 1;\n  string checkpoint = 2;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.runtime.sandbox.v1;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/metrics.proto\";\nimport \"types/mount.proto\";\nimport \"types/platform.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/runtime/sandbox/v1;sandbox\";\n\n// Sandbox is an optional interface that shim may implement to support sandboxes environments.\n// A typical example of sandbox is microVM or pause container - an entity that groups containers and/or\n// holds resources relevant for this group.\nservice Sandbox {\n  // CreateSandbox will be called right after sandbox shim instance launched.\n  // It is a good place to initialize sandbox environment.\n  rpc CreateSandbox(CreateSandboxRequest) returns (CreateSandboxResponse);\n\n  // StartSandbox will start a previously created sandbox.\n  rpc StartSandbox(StartSandboxRequest) returns (StartSandboxResponse);\n\n  // Platform queries the platform the sandbox is going to run containers on.\n  // containerd will use this to generate a proper OCI spec.\n  rpc Platform(PlatformRequest) returns (PlatformResponse);\n\n  // StopSandbox will stop existing sandbox instance\n  rpc StopSandbox(StopSandboxRequest) returns (StopSandboxResponse);\n\n  // WaitSandbox blocks until sandbox exits.\n  rpc WaitSandbox(WaitSandboxRequest) returns (WaitSandboxResponse);\n\n  // SandboxStatus will return current status of the running sandbox instance\n  rpc SandboxStatus(SandboxStatusRequest) returns (SandboxStatusResponse);\n\n  // PingSandbox is a lightweight API call to check whether sandbox alive.\n  rpc PingSandbox(PingRequest) returns (PingResponse);\n\n  // ShutdownSandbox must shutdown shim instance.\n  rpc ShutdownSandbox(ShutdownSandboxRequest) returns (ShutdownSandboxResponse);\n\n  // SandboxMetrics retrieves metrics about a sandbox instance.\n  rpc SandboxMetrics(SandboxMetricsRequest) returns (SandboxMetricsResponse);\n}\n\nmessage CreateSandboxRequest {\n  string sandbox_id = 1;\n  string bundle_path = 2;\n  repeated containerd.types.Mount rootfs = 3;\n  google.protobuf.Any options = 4;\n  string netns_path = 5;\n  map<string, string> annotations = 6;\n}\n\nmessage CreateSandboxResponse {}\n\nmessage StartSandboxRequest {\n  string sandbox_id = 1;\n}\n\nmessage StartSandboxResponse {\n  uint32 pid = 1;\n  google.protobuf.Timestamp created_at = 2;\n  google.protobuf.Any spec = 3;\n}\n\nmessage PlatformRequest {\n  string sandbox_id = 1;\n}\n\nmessage PlatformResponse {\n  containerd.types.Platform platform = 1;\n}\n\nmessage StopSandboxRequest {\n  string sandbox_id = 1;\n  uint32 timeout_secs = 2;\n}\n\nmessage StopSandboxResponse {}\n\nmessage UpdateSandboxRequest {\n  string sandbox_id = 1;\n  google.protobuf.Any resources = 2;\n  map<string, string> annotations = 3;\n}\n\nmessage WaitSandboxRequest {\n  string sandbox_id = 1;\n}\n\nmessage WaitSandboxResponse {\n  uint32 exit_status = 1;\n  google.protobuf.Timestamp exited_at = 2;\n}\n\nmessage UpdateSandboxResponse {}\n\nmessage SandboxStatusRequest {\n  string sandbox_id = 1;\n  bool verbose = 2;\n}\n\nmessage SandboxStatusResponse {\n  string sandbox_id = 1;\n  uint32 pid = 2;\n  string state = 3;\n  map<string, string> info = 4;\n  google.protobuf.Timestamp created_at = 5;\n  google.protobuf.Timestamp exited_at = 6;\n  google.protobuf.Any extra = 7;\n}\n\nmessage PingRequest {\n  string sandbox_id = 1;\n}\n\nmessage PingResponse {}\n\nmessage ShutdownSandboxRequest {\n  string sandbox_id = 1;\n}\n\nmessage ShutdownSandboxResponse {}\n\nmessage SandboxMetricsRequest {\n  string sandbox_id = 1;\n}\n\nmessage SandboxMetricsResponse {\n  containerd.types.Metric metrics = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.task.v2;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/mount.proto\";\nimport \"types/task/task.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/runtime/task/v2;task\";\n\n// Shim service is launched for each container and is responsible for owning the IO\n// for the container and its additional processes.  The shim is also the parent of\n// each container and allows reattaching to the IO and receiving the exit status\n// for the container processes.\nservice Task {\n  rpc State(StateRequest) returns (StateResponse);\n  rpc Create(CreateTaskRequest) returns (CreateTaskResponse);\n  rpc Start(StartRequest) returns (StartResponse);\n  rpc Delete(DeleteRequest) returns (DeleteResponse);\n  rpc Pids(PidsRequest) returns (PidsResponse);\n  rpc Pause(PauseRequest) returns (google.protobuf.Empty);\n  rpc Resume(ResumeRequest) returns (google.protobuf.Empty);\n  rpc Checkpoint(CheckpointTaskRequest) returns (google.protobuf.Empty);\n  rpc Kill(KillRequest) returns (google.protobuf.Empty);\n  rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty);\n  rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty);\n  rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty);\n  rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty);\n  rpc Wait(WaitRequest) returns (WaitResponse);\n  rpc Stats(StatsRequest) returns (StatsResponse);\n  rpc Connect(ConnectRequest) returns (ConnectResponse);\n  rpc Shutdown(ShutdownRequest) returns (google.protobuf.Empty);\n}\n\nmessage CreateTaskRequest {\n  string id = 1;\n  string bundle = 2;\n  repeated containerd.types.Mount rootfs = 3;\n  bool terminal = 4;\n  string stdin = 5;\n  string stdout = 6;\n  string stderr = 7;\n  string checkpoint = 8;\n  string parent_checkpoint = 9;\n  google.protobuf.Any options = 10;\n}\n\nmessage CreateTaskResponse {\n  uint32 pid = 1;\n}\n\nmessage DeleteRequest {\n  string id = 1;\n  string exec_id = 2;\n}\n\nmessage DeleteResponse {\n  uint32 pid = 1;\n  uint32 exit_status = 2;\n  google.protobuf.Timestamp exited_at = 3;\n}\n\nmessage ExecProcessRequest {\n  string id = 1;\n  string exec_id = 2;\n  bool terminal = 3;\n  string stdin = 4;\n  string stdout = 5;\n  string stderr = 6;\n  google.protobuf.Any spec = 7;\n}\n\nmessage ExecProcessResponse {}\n\nmessage ResizePtyRequest {\n  string id = 1;\n  string exec_id = 2;\n  uint32 width = 3;\n  uint32 height = 4;\n}\n\nmessage StateRequest {\n  string id = 1;\n  string exec_id = 2;\n}\n\nmessage StateResponse {\n  string id = 1;\n  string bundle = 2;\n  uint32 pid = 3;\n  containerd.v1.types.Status status = 4;\n  string stdin = 5;\n  string stdout = 6;\n  string stderr = 7;\n  bool terminal = 8;\n  uint32 exit_status = 9;\n  google.protobuf.Timestamp exited_at = 10;\n  string exec_id = 11;\n}\n\nmessage KillRequest {\n  string id = 1;\n  string exec_id = 2;\n  uint32 signal = 3;\n  bool all = 4;\n}\n\nmessage CloseIORequest {\n  string id = 1;\n  string exec_id = 2;\n  bool stdin = 3;\n}\n\nmessage PidsRequest {\n  string id = 1;\n}\n\nmessage PidsResponse {\n  repeated containerd.v1.types.ProcessInfo processes = 1;\n}\n\nmessage CheckpointTaskRequest {\n  string id = 1;\n  string path = 2;\n  google.protobuf.Any options = 3;\n}\n\nmessage UpdateTaskRequest {\n  string id = 1;\n  google.protobuf.Any resources = 2;\n  map<string, string> annotations = 3;\n}\n\nmessage StartRequest {\n  string id = 1;\n  string exec_id = 2;\n}\n\nmessage StartResponse {\n  uint32 pid = 1;\n}\n\nmessage WaitRequest {\n  string id = 1;\n  string exec_id = 2;\n}\n\nmessage WaitResponse {\n  uint32 exit_status = 1;\n  google.protobuf.Timestamp exited_at = 2;\n}\n\nmessage StatsRequest {\n  string id = 1;\n}\n\nmessage StatsResponse {\n  google.protobuf.Any stats = 1;\n}\n\nmessage ConnectRequest {\n  string id = 1;\n}\n\nmessage ConnectResponse {\n  uint32 shim_pid = 1;\n  uint32 task_pid = 2;\n  string version = 3;\n}\n\nmessage ShutdownRequest {\n  string id = 1;\n  bool now = 2;\n}\n\nmessage PauseRequest {\n  string id = 1;\n}\n\nmessage ResumeRequest {\n  string id = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.events.ttrpc.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"types/event.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/ttrpc/events/v1;events\";\n\nservice Events {\n  // Forward sends an event that has already been packaged into an envelope\n  // with a timestamp and namespace.\n  //\n  // This is useful if earlier timestamping is required or when forwarding on\n  // behalf of another component, namespace or publisher.\n  rpc Forward(ForwardRequest) returns (google.protobuf.Empty);\n}\n\nmessage ForwardRequest {\n  containerd.types.Envelope envelope = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/descriptor.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Descriptor describes a blob in a content store.\n//\n// This descriptor can be used to reference content from an\n// oci descriptor found in a manifest.\n// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor\nmessage Descriptor {\n  string media_type = 1;\n  string digest = 2;\n  int64 size = 3;\n  map<string, string> annotations = 5;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/event.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/fieldpath.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nmessage Envelope {\n  option (containerd.types.fieldpath) = true;\n  google.protobuf.Timestamp timestamp = 1;\n  string namespace = 2;\n  string topic = 3;\n  google.protobuf.Any event = 4;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/fieldpath.proto",
    "content": "// Protocol Buffers for Go with Gadgets\n//\n// Copyright (c) 2013, The GoGo Authors. All rights reserved.\n// http://github.com/gogo/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\npackage containerd.types;\n\nimport \"google/protobuf/descriptor.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nextend google.protobuf.FileOptions {\n  optional bool fieldpath_all = 63300;\n}\n\nextend google.protobuf.MessageOptions {\n  optional bool fieldpath = 64400;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/introspection.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nmessage RuntimeRequest {\n  string runtime_path = 1;\n  // Options correspond to CreateTaskRequest.options.\n  // This is needed to pass the runc binary path, etc.\n  google.protobuf.Any options = 2;\n}\n\nmessage RuntimeVersion {\n  string version = 1;\n  string revision = 2;\n}\n\nmessage RuntimeInfo {\n  string name = 1;\n  RuntimeVersion version = 2;\n  // Options correspond to RuntimeInfoRequest.Options (contains runc binary path, etc.)\n  google.protobuf.Any options = 3;\n  // OCI-compatible runtimes should use https://github.com/opencontainers/runtime-spec/blob/main/features.md\n  google.protobuf.Any features = 4;\n  // Annotations of the shim. Irrelevant to features.Annotations.\n  map<string, string> annotations = 5;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/metrics.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\nmessage Metric {\n  google.protobuf.Timestamp timestamp = 1;\n  string id = 2;\n  google.protobuf.Any data = 3;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/mount.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Mount describes mounts for a container.\n//\n// This type is the lingua franca of ContainerD. All services provide mounts\n// to be used with the container at creation time.\n//\n// The Mount type follows the structure of the mount syscall, including a type,\n// source, target and options.\nmessage Mount {\n  // Type defines the nature of the mount.\n  string type = 1;\n\n  // Source specifies the name of the mount. Depending on mount type, this\n  // may be a volume name or a host path, or even ignored.\n  string source = 2;\n\n  // Target path in container\n  string target = 3;\n\n  // Options specifies zero or more fstab style mount options.\n  repeated string options = 4;\n}\n\nmessage ActiveMount {\n  Mount mount = 1;\n\n  google.protobuf.Timestamp mounted_at = 2;\n\n  string mount_point = 3;\n\n  map<string, string> data = 4;\n}\n\nmessage ActivationInfo {\n  string name = 1;\n\n  repeated ActiveMount active = 2;\n\n  repeated Mount system = 3;\n\n  map<string, string> labels = 4;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/platform.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Platform follows the structure of the OCI platform specification, from\n// descriptors.\nmessage Platform {\n  string os = 1;\n  string architecture = 2;\n  string variant = 3;\n  string os_version = 4;\n  repeated string os_features = 5;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/runc/options/oci.proto",
    "content": "syntax = \"proto3\";\n\npackage containerd.runc.v1;\n\noption go_package = \"github.com/containerd/containerd/api/types/runc/options;options\";\n\nmessage Options {\n  // disable pivot root when creating a container\n  bool no_pivot_root = 1;\n  // create a new keyring for the container\n  bool no_new_keyring = 2;\n  // place the shim in a cgroup\n  string shim_cgroup = 3;\n  // set the I/O's pipes uid\n  uint32 io_uid = 4;\n  // set the I/O's pipes gid\n  uint32 io_gid = 5;\n  // binary name of the runc binary\n  string binary_name = 6;\n  // runc root directory\n  string root = 7;\n  // criu binary path.\n  //\n  // Removed in containerd v2.0: string criu_path = 8;\n  reserved 8;\n  // enable systemd cgroups\n  bool systemd_cgroup = 9;\n  // criu image path\n  string criu_image_path = 10;\n  // criu work path\n  string criu_work_path = 11;\n  // task api address, can be a unix domain socket, or vsock address.\n  // it is in the form of ttrpc+unix://path/to/uds or grpc+vsock://<vsock cid>:<port>.\n  string task_api_address = 12;\n  // task api version, currently supported value is 2 and 3.\n  uint32 task_api_version = 13;\n}\n\nmessage CheckpointOptions {\n  // exit the container after a checkpoint\n  bool exit = 1;\n  // checkpoint open tcp connections\n  bool open_tcp = 2;\n  // checkpoint external unix sockets\n  bool external_unix_sockets = 3;\n  // checkpoint terminals (ptys)\n  bool terminal = 4;\n  // allow checkpointing of file locks\n  bool file_locks = 5;\n  // restore provided namespaces as empty namespaces\n  repeated string empty_namespaces = 6;\n  // set the cgroups mode, soft, full, strict\n  string cgroups_mode = 7;\n  // checkpoint image path\n  string image_path = 8;\n  // checkpoint work path\n  string work_path = 9;\n}\n\nmessage ProcessDetails {\n  // exec process id if the process is managed by a shim\n  string exec_id = 1;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/sandbox.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Sandbox represents a sandbox metadata object that keeps all info required by controller to\n// work with a particular instance.\nmessage Sandbox {\n  // SandboxID is a unique instance identifier within namespace\n  string sandbox_id = 1;\n  message Runtime {\n    // Name is the name of the runtime.\n    string name = 1;\n    // Options specify additional runtime initialization options for the shim (this data will be available in StartShim).\n    // Typically this data expected to be runtime shim implementation specific.\n    google.protobuf.Any options = 2;\n  }\n  // Runtime specifies which runtime to use for executing this container.\n  Runtime runtime = 2;\n  // Spec is sandbox configuration (kin of OCI runtime spec), spec's data will be written to a config.json file in the\n  // bundle directory (similary to OCI spec).\n  google.protobuf.Any spec = 3;\n  // Labels provides an area to include arbitrary data on containers.\n  map<string, string> labels = 4;\n  // CreatedAt is the time the container was first created.\n  google.protobuf.Timestamp created_at = 5;\n  // UpdatedAt is the last time the container was mutated.\n  google.protobuf.Timestamp updated_at = 6;\n  // Extensions allow clients to provide optional blobs that can be handled by runtime.\n  map<string, google.protobuf.Any> extensions = 7;\n  // Sandboxer is the name of the sandbox controller who manages the sandbox.\n  string sandboxer = 10;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/task/task.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.v1.types;\n\nimport \"google/protobuf/any.proto\";\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types/task\";\n\nenum Status {\n  UNKNOWN = 0;\n  CREATED = 1;\n  RUNNING = 2;\n  STOPPED = 3;\n  PAUSED = 4;\n  PAUSING = 5;\n}\n\nmessage Process {\n  string container_id = 1;\n  string id = 2;\n  uint32 pid = 3;\n  Status status = 4;\n  string stdin = 5;\n  string stdout = 6;\n  string stderr = 7;\n  bool terminal = 8;\n  uint32 exit_status = 9;\n  google.protobuf.Timestamp exited_at = 10;\n}\n\nmessage ProcessInfo {\n  // PID is the process ID.\n  uint32 pid = 1;\n  // Info contains additional process information.\n  //\n  // Info varies by platform.\n  google.protobuf.Any info = 2;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/gogoproto/gogo.proto",
    "content": "// Protocol Buffers for Go with Gadgets\n//\n// Copyright (c) 2013, The GoGo Authors. All rights reserved.\n// http://github.com/gogo/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto2\";\npackage gogoproto;\n\nimport \"google/protobuf/descriptor.proto\";\n\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"GoGoProtos\";\noption go_package = \"github.com/gogo/protobuf/gogoproto\";\n\nextend google.protobuf.EnumOptions {\n\toptional bool goproto_enum_prefix = 62001;\n\toptional bool goproto_enum_stringer = 62021;\n\toptional bool enum_stringer = 62022;\n\toptional string enum_customname = 62023;\n\toptional bool enumdecl = 62024;\n}\n\nextend google.protobuf.EnumValueOptions {\n\toptional string enumvalue_customname = 66001;\n}\n\nextend google.protobuf.FileOptions {\n\toptional bool goproto_getters_all = 63001;\n\toptional bool goproto_enum_prefix_all = 63002;\n\toptional bool goproto_stringer_all = 63003;\n\toptional bool verbose_equal_all = 63004;\n\toptional bool face_all = 63005;\n\toptional bool gostring_all = 63006;\n\toptional bool populate_all = 63007;\n\toptional bool stringer_all = 63008;\n\toptional bool onlyone_all = 63009;\n\n\toptional bool equal_all = 63013;\n\toptional bool description_all = 63014;\n\toptional bool testgen_all = 63015;\n\toptional bool benchgen_all = 63016;\n\toptional bool marshaler_all = 63017;\n\toptional bool unmarshaler_all = 63018;\n\toptional bool stable_marshaler_all = 63019;\n\n\toptional bool sizer_all = 63020;\n\n\toptional bool goproto_enum_stringer_all = 63021;\n\toptional bool enum_stringer_all = 63022;\n\n\toptional bool unsafe_marshaler_all = 63023;\n\toptional bool unsafe_unmarshaler_all = 63024;\n\n\toptional bool goproto_extensions_map_all = 63025;\n\toptional bool goproto_unrecognized_all = 63026;\n\toptional bool gogoproto_import = 63027;\n\toptional bool protosizer_all = 63028;\n\toptional bool compare_all = 63029;\n    optional bool typedecl_all = 63030;\n    optional bool enumdecl_all = 63031;\n\n\toptional bool goproto_registration = 63032;\n\toptional bool messagename_all = 63033;\n\n\toptional bool goproto_sizecache_all = 63034;\n\toptional bool goproto_unkeyed_all = 63035;\n}\n\nextend google.protobuf.MessageOptions {\n\toptional bool goproto_getters = 64001;\n\toptional bool goproto_stringer = 64003;\n\toptional bool verbose_equal = 64004;\n\toptional bool face = 64005;\n\toptional bool gostring = 64006;\n\toptional bool populate = 64007;\n\toptional bool stringer = 67008;\n\toptional bool onlyone = 64009;\n\n\toptional bool equal = 64013;\n\toptional bool description = 64014;\n\toptional bool testgen = 64015;\n\toptional bool benchgen = 64016;\n\toptional bool marshaler = 64017;\n\toptional bool unmarshaler = 64018;\n\toptional bool stable_marshaler = 64019;\n\n\toptional bool sizer = 64020;\n\n\toptional bool unsafe_marshaler = 64023;\n\toptional bool unsafe_unmarshaler = 64024;\n\n\toptional bool goproto_extensions_map = 64025;\n\toptional bool goproto_unrecognized = 64026;\n\n\toptional bool protosizer = 64028;\n\toptional bool compare = 64029;\n\n\toptional bool typedecl = 64030;\n\n\toptional bool messagename = 64033;\n\n\toptional bool goproto_sizecache = 64034;\n\toptional bool goproto_unkeyed = 64035;\n}\n\nextend google.protobuf.FieldOptions {\n\toptional bool nullable = 65001;\n\toptional bool embed = 65002;\n\toptional string customtype = 65003;\n\toptional string customname = 65004;\n\toptional string jsontag = 65005;\n\toptional string moretags = 65006;\n\toptional string casttype = 65007;\n\toptional string castkey = 65008;\n\toptional string castvalue = 65009;\n\n\toptional bool stdtime = 65010;\n\toptional bool stdduration = 65011;\n\toptional bool wktpointer = 65012;\n\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/google/protobuf/any.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption go_package = \"google.golang.org/protobuf/types/known/anypb\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"AnyProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n// `Any` contains an arbitrary serialized protocol buffer message along with a\n// URL that describes the type of the serialized message.\n//\n// Protobuf library provides support to pack/unpack Any values in the form\n// of utility functions or additional generated methods of the Any type.\n//\n// Example 1: Pack and unpack a message in C++.\n//\n//     Foo foo = ...;\n//     Any any;\n//     any.PackFrom(foo);\n//     ...\n//     if (any.UnpackTo(&foo)) {\n//       ...\n//     }\n//\n// Example 2: Pack and unpack a message in Java.\n//\n//     Foo foo = ...;\n//     Any any = Any.pack(foo);\n//     ...\n//     if (any.is(Foo.class)) {\n//       foo = any.unpack(Foo.class);\n//     }\n//\n//  Example 3: Pack and unpack a message in Python.\n//\n//     foo = Foo(...)\n//     any = Any()\n//     any.Pack(foo)\n//     ...\n//     if any.Is(Foo.DESCRIPTOR):\n//       any.Unpack(foo)\n//       ...\n//\n//  Example 4: Pack and unpack a message in Go\n//\n//      foo := &pb.Foo{...}\n//      any, err := anypb.New(foo)\n//      if err != nil {\n//        ...\n//      }\n//      ...\n//      foo := &pb.Foo{}\n//      if err := any.UnmarshalTo(foo); err != nil {\n//        ...\n//      }\n//\n// The pack methods provided by protobuf library will by default use\n// 'type.googleapis.com/full.type.name' as the type URL and the unpack\n// methods only use the fully qualified type name after the last '/'\n// in the type URL, for example \"foo.bar.com/x/y.z\" will yield type\n// name \"y.z\".\n//\n//\n// JSON\n// ====\n// The JSON representation of an `Any` value uses the regular\n// representation of the deserialized, embedded message, with an\n// additional field `@type` which contains the type URL. Example:\n//\n//     package google.profile;\n//     message Person {\n//       string first_name = 1;\n//       string last_name = 2;\n//     }\n//\n//     {\n//       \"@type\": \"type.googleapis.com/google.profile.Person\",\n//       \"firstName\": <string>,\n//       \"lastName\": <string>\n//     }\n//\n// If the embedded message type is well-known and has a custom JSON\n// representation, that representation will be embedded adding a field\n// `value` which holds the custom JSON in addition to the `@type`\n// field. Example (for message [google.protobuf.Duration][]):\n//\n//     {\n//       \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n//       \"value\": \"1.212s\"\n//     }\n//\nmessage Any {\n  // A URL/resource name that uniquely identifies the type of the serialized\n  // protocol buffer message. This string must contain at least\n  // one \"/\" character. The last segment of the URL's path must represent\n  // the fully qualified name of the type (as in\n  // `path/google.protobuf.Duration`). The name should be in a canonical form\n  // (e.g., leading \".\" is not accepted).\n  //\n  // In practice, teams usually precompile into the binary all types that they\n  // expect it to use in the context of Any. However, for URLs which use the\n  // scheme `http`, `https`, or no scheme, one can optionally set up a type\n  // server that maps type URLs to message definitions as follows:\n  //\n  // * If no scheme is provided, `https` is assumed.\n  // * An HTTP GET on the URL must yield a [google.protobuf.Type][]\n  //   value in binary format, or produce an error.\n  // * Applications are allowed to cache lookup results based on the\n  //   URL, or have them precompiled into a binary to avoid any\n  //   lookup. Therefore, binary compatibility needs to be preserved\n  //   on changes to types. (Use versioned type names to manage\n  //   breaking changes.)\n  //\n  // Note: this functionality is not currently available in the official\n  // protobuf release, and it is not used for type URLs beginning with\n  // type.googleapis.com.\n  //\n  // Schemes other than `http`, `https` (or the empty scheme) might be\n  // used with implementation specific semantics.\n  //\n  string type_url = 1;\n\n  // Must be a valid serialized protocol buffer of the above specified type.\n  bytes value = 2;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/google/protobuf/descriptor.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// The messages in this file describe the definitions found in .proto files.\n// A valid .proto file can be translated directly to a FileDescriptorProto\n// without any other information (e.g. without reading its imports).\n\n\nsyntax = \"proto2\";\n\npackage google.protobuf;\n\noption go_package = \"google.golang.org/protobuf/types/descriptorpb\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"DescriptorProtos\";\noption csharp_namespace = \"Google.Protobuf.Reflection\";\noption objc_class_prefix = \"GPB\";\noption cc_enable_arenas = true;\n\n// descriptor.proto must be optimized for speed because reflection-based\n// algorithms don't work during bootstrapping.\noption optimize_for = SPEED;\n\n// The protocol compiler can output a FileDescriptorSet containing the .proto\n// files it parses.\nmessage FileDescriptorSet {\n  repeated FileDescriptorProto file = 1;\n}\n\n// Describes a complete .proto file.\nmessage FileDescriptorProto {\n  optional string name = 1;     // file name, relative to root of source tree\n  optional string package = 2;  // e.g. \"foo\", \"foo.bar\", etc.\n\n  // Names of files imported by this file.\n  repeated string dependency = 3;\n  // Indexes of the public imported files in the dependency list above.\n  repeated int32 public_dependency = 10;\n  // Indexes of the weak imported files in the dependency list.\n  // For Google-internal migration only. Do not use.\n  repeated int32 weak_dependency = 11;\n\n  // All top-level definitions in this file.\n  repeated DescriptorProto message_type = 4;\n  repeated EnumDescriptorProto enum_type = 5;\n  repeated ServiceDescriptorProto service = 6;\n  repeated FieldDescriptorProto extension = 7;\n\n  optional FileOptions options = 8;\n\n  // This field contains optional information about the original source code.\n  // You may safely remove this entire field without harming runtime\n  // functionality of the descriptors -- the information is needed only by\n  // development tools.\n  optional SourceCodeInfo source_code_info = 9;\n\n  // The syntax of the proto file.\n  // The supported values are \"proto2\" and \"proto3\".\n  optional string syntax = 12;\n}\n\n// Describes a message type.\nmessage DescriptorProto {\n  optional string name = 1;\n\n  repeated FieldDescriptorProto field = 2;\n  repeated FieldDescriptorProto extension = 6;\n\n  repeated DescriptorProto nested_type = 3;\n  repeated EnumDescriptorProto enum_type = 4;\n\n  message ExtensionRange {\n    optional int32 start = 1;  // Inclusive.\n    optional int32 end = 2;    // Exclusive.\n\n    optional ExtensionRangeOptions options = 3;\n  }\n  repeated ExtensionRange extension_range = 5;\n\n  repeated OneofDescriptorProto oneof_decl = 8;\n\n  optional MessageOptions options = 7;\n\n  // Range of reserved tag numbers. Reserved tag numbers may not be used by\n  // fields or extension ranges in the same message. Reserved ranges may\n  // not overlap.\n  message ReservedRange {\n    optional int32 start = 1;  // Inclusive.\n    optional int32 end = 2;    // Exclusive.\n  }\n  repeated ReservedRange reserved_range = 9;\n  // Reserved field names, which may not be used by fields in the same message.\n  // A given name may only be reserved once.\n  repeated string reserved_name = 10;\n}\n\nmessage ExtensionRangeOptions {\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\n// Describes a field within a message.\nmessage FieldDescriptorProto {\n  enum Type {\n    // 0 is reserved for errors.\n    // Order is weird for historical reasons.\n    TYPE_DOUBLE = 1;\n    TYPE_FLOAT = 2;\n    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if\n    // negative values are likely.\n    TYPE_INT64 = 3;\n    TYPE_UINT64 = 4;\n    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if\n    // negative values are likely.\n    TYPE_INT32 = 5;\n    TYPE_FIXED64 = 6;\n    TYPE_FIXED32 = 7;\n    TYPE_BOOL = 8;\n    TYPE_STRING = 9;\n    // Tag-delimited aggregate.\n    // Group type is deprecated and not supported in proto3. However, Proto3\n    // implementations should still be able to parse the group wire format and\n    // treat group fields as unknown fields.\n    TYPE_GROUP = 10;\n    TYPE_MESSAGE = 11;  // Length-delimited aggregate.\n\n    // New in version 2.\n    TYPE_BYTES = 12;\n    TYPE_UINT32 = 13;\n    TYPE_ENUM = 14;\n    TYPE_SFIXED32 = 15;\n    TYPE_SFIXED64 = 16;\n    TYPE_SINT32 = 17;  // Uses ZigZag encoding.\n    TYPE_SINT64 = 18;  // Uses ZigZag encoding.\n  }\n\n  enum Label {\n    // 0 is reserved for errors\n    LABEL_OPTIONAL = 1;\n    LABEL_REQUIRED = 2;\n    LABEL_REPEATED = 3;\n  }\n\n  optional string name = 1;\n  optional int32 number = 3;\n  optional Label label = 4;\n\n  // If type_name is set, this need not be set.  If both this and type_name\n  // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.\n  optional Type type = 5;\n\n  // For message and enum types, this is the name of the type.  If the name\n  // starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping\n  // rules are used to find the type (i.e. first the nested types within this\n  // message are searched, then within the parent, on up to the root\n  // namespace).\n  optional string type_name = 6;\n\n  // For extensions, this is the name of the type being extended.  It is\n  // resolved in the same manner as type_name.\n  optional string extendee = 2;\n\n  // For numeric types, contains the original text representation of the value.\n  // For booleans, \"true\" or \"false\".\n  // For strings, contains the default text contents (not escaped in any way).\n  // For bytes, contains the C escaped value.  All bytes >= 128 are escaped.\n  // TODO(kenton):  Base-64 encode?\n  optional string default_value = 7;\n\n  // If set, gives the index of a oneof in the containing type's oneof_decl\n  // list.  This field is a member of that oneof.\n  optional int32 oneof_index = 9;\n\n  // JSON name of this field. The value is set by protocol compiler. If the\n  // user has set a \"json_name\" option on this field, that option's value\n  // will be used. Otherwise, it's deduced from the field's name by converting\n  // it to camelCase.\n  optional string json_name = 10;\n\n  optional FieldOptions options = 8;\n\n  // If true, this is a proto3 \"optional\". When a proto3 field is optional, it\n  // tracks presence regardless of field type.\n  //\n  // When proto3_optional is true, this field must be belong to a oneof to\n  // signal to old proto3 clients that presence is tracked for this field. This\n  // oneof is known as a \"synthetic\" oneof, and this field must be its sole\n  // member (each proto3 optional field gets its own synthetic oneof). Synthetic\n  // oneofs exist in the descriptor only, and do not generate any API. Synthetic\n  // oneofs must be ordered after all \"real\" oneofs.\n  //\n  // For message fields, proto3_optional doesn't create any semantic change,\n  // since non-repeated message fields always track presence. However it still\n  // indicates the semantic detail of whether the user wrote \"optional\" or not.\n  // This can be useful for round-tripping the .proto file. For consistency we\n  // give message fields a synthetic oneof also, even though it is not required\n  // to track presence. This is especially important because the parser can't\n  // tell if a field is a message or an enum, so it must always create a\n  // synthetic oneof.\n  //\n  // Proto2 optional fields do not set this flag, because they already indicate\n  // optional with `LABEL_OPTIONAL`.\n  optional bool proto3_optional = 17;\n}\n\n// Describes a oneof.\nmessage OneofDescriptorProto {\n  optional string name = 1;\n  optional OneofOptions options = 2;\n}\n\n// Describes an enum type.\nmessage EnumDescriptorProto {\n  optional string name = 1;\n\n  repeated EnumValueDescriptorProto value = 2;\n\n  optional EnumOptions options = 3;\n\n  // Range of reserved numeric values. Reserved values may not be used by\n  // entries in the same enum. Reserved ranges may not overlap.\n  //\n  // Note that this is distinct from DescriptorProto.ReservedRange in that it\n  // is inclusive such that it can appropriately represent the entire int32\n  // domain.\n  message EnumReservedRange {\n    optional int32 start = 1;  // Inclusive.\n    optional int32 end = 2;    // Inclusive.\n  }\n\n  // Range of reserved numeric values. Reserved numeric values may not be used\n  // by enum values in the same enum declaration. Reserved ranges may not\n  // overlap.\n  repeated EnumReservedRange reserved_range = 4;\n\n  // Reserved enum value names, which may not be reused. A given name may only\n  // be reserved once.\n  repeated string reserved_name = 5;\n}\n\n// Describes a value within an enum.\nmessage EnumValueDescriptorProto {\n  optional string name = 1;\n  optional int32 number = 2;\n\n  optional EnumValueOptions options = 3;\n}\n\n// Describes a service.\nmessage ServiceDescriptorProto {\n  optional string name = 1;\n  repeated MethodDescriptorProto method = 2;\n\n  optional ServiceOptions options = 3;\n}\n\n// Describes a method of a service.\nmessage MethodDescriptorProto {\n  optional string name = 1;\n\n  // Input and output type names.  These are resolved in the same way as\n  // FieldDescriptorProto.type_name, but must refer to a message type.\n  optional string input_type = 2;\n  optional string output_type = 3;\n\n  optional MethodOptions options = 4;\n\n  // Identifies if client streams multiple client messages\n  optional bool client_streaming = 5 [default = false];\n  // Identifies if server streams multiple server messages\n  optional bool server_streaming = 6 [default = false];\n}\n\n\n// ===================================================================\n// Options\n\n// Each of the definitions above may have \"options\" attached.  These are\n// just annotations which may cause code to be generated slightly differently\n// or may contain hints for code that manipulates protocol messages.\n//\n// Clients may define custom options as extensions of the *Options messages.\n// These extensions may not yet be known at parsing time, so the parser cannot\n// store the values in them.  Instead it stores them in a field in the *Options\n// message called uninterpreted_option. This field must have the same name\n// across all *Options messages. We then use this field to populate the\n// extensions when we build a descriptor, at which point all protos have been\n// parsed and so all extensions are known.\n//\n// Extension numbers for custom options may be chosen as follows:\n// * For options which will only be used within a single application or\n//   organization, or for experimental options, use field numbers 50000\n//   through 99999.  It is up to you to ensure that you do not use the\n//   same number for multiple options.\n// * For options which will be published and used publicly by multiple\n//   independent entities, e-mail protobuf-global-extension-registry@google.com\n//   to reserve extension numbers. Simply provide your project name (e.g.\n//   Objective-C plugin) and your project website (if available) -- there's no\n//   need to explain how you intend to use them. Usually you only need one\n//   extension number. You can declare multiple options with only one extension\n//   number by putting them in a sub-message. See the Custom Options section of\n//   the docs for examples:\n//   https://developers.google.com/protocol-buffers/docs/proto#options\n//   If this turns out to be popular, a web service will be set up\n//   to automatically assign option numbers.\n\nmessage FileOptions {\n\n  // Sets the Java package where classes generated from this .proto will be\n  // placed.  By default, the proto package is used, but this is often\n  // inappropriate because proto packages do not normally start with backwards\n  // domain names.\n  optional string java_package = 1;\n\n\n  // Controls the name of the wrapper Java class generated for the .proto file.\n  // That class will always contain the .proto file's getDescriptor() method as\n  // well as any top-level extensions defined in the .proto file.\n  // If java_multiple_files is disabled, then all the other classes from the\n  // .proto file will be nested inside the single wrapper outer class.\n  optional string java_outer_classname = 8;\n\n  // If enabled, then the Java code generator will generate a separate .java\n  // file for each top-level message, enum, and service defined in the .proto\n  // file.  Thus, these types will *not* be nested inside the wrapper class\n  // named by java_outer_classname.  However, the wrapper class will still be\n  // generated to contain the file's getDescriptor() method as well as any\n  // top-level extensions defined in the file.\n  optional bool java_multiple_files = 10 [default = false];\n\n  // This option does nothing.\n  optional bool java_generate_equals_and_hash = 20 [deprecated=true];\n\n  // If set true, then the Java2 code generator will generate code that\n  // throws an exception whenever an attempt is made to assign a non-UTF-8\n  // byte sequence to a string field.\n  // Message reflection will do the same.\n  // However, an extension field still accepts non-UTF-8 byte sequences.\n  // This option has no effect on when used with the lite runtime.\n  optional bool java_string_check_utf8 = 27 [default = false];\n\n\n  // Generated classes can be optimized for speed or code size.\n  enum OptimizeMode {\n    SPEED = 1;         // Generate complete code for parsing, serialization,\n                       // etc.\n    CODE_SIZE = 2;     // Use ReflectionOps to implement these methods.\n    LITE_RUNTIME = 3;  // Generate code using MessageLite and the lite runtime.\n  }\n  optional OptimizeMode optimize_for = 9 [default = SPEED];\n\n  // Sets the Go package where structs generated from this .proto will be\n  // placed. If omitted, the Go package will be derived from the following:\n  //   - The basename of the package import path, if provided.\n  //   - Otherwise, the package statement in the .proto file, if present.\n  //   - Otherwise, the basename of the .proto file, without extension.\n  optional string go_package = 11;\n\n\n\n\n  // Should generic services be generated in each language?  \"Generic\" services\n  // are not specific to any particular RPC system.  They are generated by the\n  // main code generators in each language (without additional plugins).\n  // Generic services were the only kind of service generation supported by\n  // early versions of google.protobuf.\n  //\n  // Generic services are now considered deprecated in favor of using plugins\n  // that generate code specific to your particular RPC system.  Therefore,\n  // these default to false.  Old code which depends on generic services should\n  // explicitly set them to true.\n  optional bool cc_generic_services = 16 [default = false];\n  optional bool java_generic_services = 17 [default = false];\n  optional bool py_generic_services = 18 [default = false];\n  optional bool php_generic_services = 42 [default = false];\n\n  // Is this file deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for everything in the file, or it will be completely ignored; in the very\n  // least, this is a formalization for deprecating files.\n  optional bool deprecated = 23 [default = false];\n\n  // Enables the use of arenas for the proto messages in this file. This applies\n  // only to generated classes for C++.\n  optional bool cc_enable_arenas = 31 [default = true];\n\n\n  // Sets the objective c class prefix which is prepended to all objective c\n  // generated classes from this .proto. There is no default.\n  optional string objc_class_prefix = 36;\n\n  // Namespace for generated classes; defaults to the package.\n  optional string csharp_namespace = 37;\n\n  // By default Swift generators will take the proto package and CamelCase it\n  // replacing '.' with underscore and use that to prefix the types/symbols\n  // defined. When this options is provided, they will use this value instead\n  // to prefix the types/symbols defined.\n  optional string swift_prefix = 39;\n\n  // Sets the php class prefix which is prepended to all php generated classes\n  // from this .proto. Default is empty.\n  optional string php_class_prefix = 40;\n\n  // Use this option to change the namespace of php generated classes. Default\n  // is empty. When this option is empty, the package name will be used for\n  // determining the namespace.\n  optional string php_namespace = 41;\n\n  // Use this option to change the namespace of php generated metadata classes.\n  // Default is empty. When this option is empty, the proto file name will be\n  // used for determining the namespace.\n  optional string php_metadata_namespace = 44;\n\n  // Use this option to change the package of ruby generated classes. Default\n  // is empty. When this option is not set, the package name will be used for\n  // determining the ruby package.\n  optional string ruby_package = 45;\n\n\n  // The parser stores options it doesn't recognize here.\n  // See the documentation for the \"Options\" section above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message.\n  // See the documentation for the \"Options\" section above.\n  extensions 1000 to max;\n\n  reserved 38;\n}\n\nmessage MessageOptions {\n  // Set true to use the old proto1 MessageSet wire format for extensions.\n  // This is provided for backwards-compatibility with the MessageSet wire\n  // format.  You should not use this for any other reason:  It's less\n  // efficient, has fewer features, and is more complicated.\n  //\n  // The message must be defined exactly as follows:\n  //   message Foo {\n  //     option message_set_wire_format = true;\n  //     extensions 4 to max;\n  //   }\n  // Note that the message cannot have any defined fields; MessageSets only\n  // have extensions.\n  //\n  // All extensions of your type must be singular messages; e.g. they cannot\n  // be int32s, enums, or repeated messages.\n  //\n  // Because this is an option, the above two restrictions are not enforced by\n  // the protocol compiler.\n  optional bool message_set_wire_format = 1 [default = false];\n\n  // Disables the generation of the standard \"descriptor()\" accessor, which can\n  // conflict with a field of the same name.  This is meant to make migration\n  // from proto1 easier; new code should avoid fields named \"descriptor\".\n  optional bool no_standard_descriptor_accessor = 2 [default = false];\n\n  // Is this message deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the message, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating messages.\n  optional bool deprecated = 3 [default = false];\n\n  reserved 4, 5, 6;\n\n  // Whether the message is an automatically generated map entry type for the\n  // maps field.\n  //\n  // For maps fields:\n  //     map<KeyType, ValueType> map_field = 1;\n  // The parsed descriptor looks like:\n  //     message MapFieldEntry {\n  //         option map_entry = true;\n  //         optional KeyType key = 1;\n  //         optional ValueType value = 2;\n  //     }\n  //     repeated MapFieldEntry map_field = 1;\n  //\n  // Implementations may choose not to generate the map_entry=true message, but\n  // use a native map in the target language to hold the keys and values.\n  // The reflection APIs in such implementations still need to work as\n  // if the field is a repeated message field.\n  //\n  // NOTE: Do not set the option in .proto files. Always use the maps syntax\n  // instead. The option should only be implicitly set by the proto compiler\n  // parser.\n  optional bool map_entry = 7;\n\n  reserved 8;  // javalite_serializable\n  reserved 9;  // javanano_as_lite\n\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage FieldOptions {\n  // The ctype option instructs the C++ code generator to use a different\n  // representation of the field than it normally would.  See the specific\n  // options below.  This option is not yet implemented in the open source\n  // release -- sorry, we'll try to include it in a future version!\n  optional CType ctype = 1 [default = STRING];\n  enum CType {\n    // Default mode.\n    STRING = 0;\n\n    CORD = 1;\n\n    STRING_PIECE = 2;\n  }\n  // The packed option can be enabled for repeated primitive fields to enable\n  // a more efficient representation on the wire. Rather than repeatedly\n  // writing the tag and type for each element, the entire array is encoded as\n  // a single length-delimited blob. In proto3, only explicit setting it to\n  // false will avoid using packed encoding.\n  optional bool packed = 2;\n\n  // The jstype option determines the JavaScript type used for values of the\n  // field.  The option is permitted only for 64 bit integral and fixed types\n  // (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING\n  // is represented as JavaScript string, which avoids loss of precision that\n  // can happen when a large value is converted to a floating point JavaScript.\n  // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to\n  // use the JavaScript \"number\" type.  The behavior of the default option\n  // JS_NORMAL is implementation dependent.\n  //\n  // This option is an enum to permit additional types to be added, e.g.\n  // goog.math.Integer.\n  optional JSType jstype = 6 [default = JS_NORMAL];\n  enum JSType {\n    // Use the default type.\n    JS_NORMAL = 0;\n\n    // Use JavaScript strings.\n    JS_STRING = 1;\n\n    // Use JavaScript numbers.\n    JS_NUMBER = 2;\n  }\n\n  // Should this field be parsed lazily?  Lazy applies only to message-type\n  // fields.  It means that when the outer message is initially parsed, the\n  // inner message's contents will not be parsed but instead stored in encoded\n  // form.  The inner message will actually be parsed when it is first accessed.\n  //\n  // This is only a hint.  Implementations are free to choose whether to use\n  // eager or lazy parsing regardless of the value of this option.  However,\n  // setting this option true suggests that the protocol author believes that\n  // using lazy parsing on this field is worth the additional bookkeeping\n  // overhead typically needed to implement it.\n  //\n  // This option does not affect the public interface of any generated code;\n  // all method signatures remain the same.  Furthermore, thread-safety of the\n  // interface is not affected by this option; const methods remain safe to\n  // call from multiple threads concurrently, while non-const methods continue\n  // to require exclusive access.\n  //\n  //\n  // Note that implementations may choose not to check required fields within\n  // a lazy sub-message.  That is, calling IsInitialized() on the outer message\n  // may return true even if the inner message has missing required fields.\n  // This is necessary because otherwise the inner message would have to be\n  // parsed in order to perform the check, defeating the purpose of lazy\n  // parsing.  An implementation which chooses not to check required fields\n  // must be consistent about it.  That is, for any particular sub-message, the\n  // implementation must either *always* check its required fields, or *never*\n  // check its required fields, regardless of whether or not the message has\n  // been parsed.\n  optional bool lazy = 5 [default = false];\n\n  // Is this field deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for accessors, or it will be completely ignored; in the very least, this\n  // is a formalization for deprecating fields.\n  optional bool deprecated = 3 [default = false];\n\n  // For Google-internal migration only. Do not use.\n  optional bool weak = 10 [default = false];\n\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n\n  reserved 4;  // removed jtype\n}\n\nmessage OneofOptions {\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage EnumOptions {\n\n  // Set this option to true to allow mapping different tag names to the same\n  // value.\n  optional bool allow_alias = 2;\n\n  // Is this enum deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the enum, or it will be completely ignored; in the very least, this\n  // is a formalization for deprecating enums.\n  optional bool deprecated = 3 [default = false];\n\n  reserved 5;  // javanano_as_lite\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage EnumValueOptions {\n  // Is this enum value deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the enum value, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating enum values.\n  optional bool deprecated = 1 [default = false];\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage ServiceOptions {\n\n  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC\n  //   framework.  We apologize for hoarding these numbers to ourselves, but\n  //   we were already using them long before we decided to release Protocol\n  //   Buffers.\n\n  // Is this service deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the service, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating services.\n  optional bool deprecated = 33 [default = false];\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage MethodOptions {\n\n  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC\n  //   framework.  We apologize for hoarding these numbers to ourselves, but\n  //   we were already using them long before we decided to release Protocol\n  //   Buffers.\n\n  // Is this method deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the method, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating methods.\n  optional bool deprecated = 33 [default = false];\n\n  // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,\n  // or neither? HTTP based RPC implementation may choose GET verb for safe\n  // methods, and PUT verb for idempotent methods instead of the default POST.\n  enum IdempotencyLevel {\n    IDEMPOTENCY_UNKNOWN = 0;\n    NO_SIDE_EFFECTS = 1;  // implies idempotent\n    IDEMPOTENT = 2;       // idempotent, but may have side effects\n  }\n  optional IdempotencyLevel idempotency_level = 34\n      [default = IDEMPOTENCY_UNKNOWN];\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\n\n// A message representing a option the parser does not recognize. This only\n// appears in options protos created by the compiler::Parser class.\n// DescriptorPool resolves these when building Descriptor objects. Therefore,\n// options protos in descriptor objects (e.g. returned by Descriptor::options(),\n// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions\n// in them.\nmessage UninterpretedOption {\n  // The name of the uninterpreted option.  Each string represents a segment in\n  // a dot-separated name.  is_extension is true iff a segment represents an\n  // extension (denoted with parentheses in options specs in .proto files).\n  // E.g.,{ [\"foo\", false], [\"bar.baz\", true], [\"qux\", false] } represents\n  // \"foo.(bar.baz).qux\".\n  message NamePart {\n    required string name_part = 1;\n    required bool is_extension = 2;\n  }\n  repeated NamePart name = 2;\n\n  // The value of the uninterpreted option, in whatever type the tokenizer\n  // identified it as during parsing. Exactly one of these should be set.\n  optional string identifier_value = 3;\n  optional uint64 positive_int_value = 4;\n  optional int64 negative_int_value = 5;\n  optional double double_value = 6;\n  optional bytes string_value = 7;\n  optional string aggregate_value = 8;\n}\n\n// ===================================================================\n// Optional source code info\n\n// Encapsulates information about the original source file from which a\n// FileDescriptorProto was generated.\nmessage SourceCodeInfo {\n  // A Location identifies a piece of source code in a .proto file which\n  // corresponds to a particular definition.  This information is intended\n  // to be useful to IDEs, code indexers, documentation generators, and similar\n  // tools.\n  //\n  // For example, say we have a file like:\n  //   message Foo {\n  //     optional string foo = 1;\n  //   }\n  // Let's look at just the field definition:\n  //   optional string foo = 1;\n  //   ^       ^^     ^^  ^  ^^^\n  //   a       bc     de  f  ghi\n  // We have the following locations:\n  //   span   path               represents\n  //   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.\n  //   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).\n  //   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).\n  //   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).\n  //   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).\n  //\n  // Notes:\n  // - A location may refer to a repeated field itself (i.e. not to any\n  //   particular index within it).  This is used whenever a set of elements are\n  //   logically enclosed in a single code segment.  For example, an entire\n  //   extend block (possibly containing multiple extension definitions) will\n  //   have an outer location whose path refers to the \"extensions\" repeated\n  //   field without an index.\n  // - Multiple locations may have the same path.  This happens when a single\n  //   logical declaration is spread out across multiple places.  The most\n  //   obvious example is the \"extend\" block again -- there may be multiple\n  //   extend blocks in the same scope, each of which will have the same path.\n  // - A location's span is not always a subset of its parent's span.  For\n  //   example, the \"extendee\" of an extension declaration appears at the\n  //   beginning of the \"extend\" block and is shared by all extensions within\n  //   the block.\n  // - Just because a location's span is a subset of some other location's span\n  //   does not mean that it is a descendant.  For example, a \"group\" defines\n  //   both a type and a field in a single declaration.  Thus, the locations\n  //   corresponding to the type and field and their components will overlap.\n  // - Code which tries to interpret locations should probably be designed to\n  //   ignore those that it doesn't understand, as more types of locations could\n  //   be recorded in the future.\n  repeated Location location = 1;\n  message Location {\n    // Identifies which part of the FileDescriptorProto was defined at this\n    // location.\n    //\n    // Each element is a field number or an index.  They form a path from\n    // the root FileDescriptorProto to the place where the definition.  For\n    // example, this path:\n    //   [ 4, 3, 2, 7, 1 ]\n    // refers to:\n    //   file.message_type(3)  // 4, 3\n    //       .field(7)         // 2, 7\n    //       .name()           // 1\n    // This is because FileDescriptorProto.message_type has field number 4:\n    //   repeated DescriptorProto message_type = 4;\n    // and DescriptorProto.field has field number 2:\n    //   repeated FieldDescriptorProto field = 2;\n    // and FieldDescriptorProto.name has field number 1:\n    //   optional string name = 1;\n    //\n    // Thus, the above path gives the location of a field name.  If we removed\n    // the last element:\n    //   [ 4, 3, 2, 7 ]\n    // this path refers to the whole field declaration (from the beginning\n    // of the label to the terminating semicolon).\n    repeated int32 path = 1 [packed = true];\n\n    // Always has exactly three or four elements: start line, start column,\n    // end line (optional, otherwise assumed same as start line), end column.\n    // These are packed into a single field for efficiency.  Note that line\n    // and column numbers are zero-based -- typically you will want to add\n    // 1 to each before displaying to a user.\n    repeated int32 span = 2 [packed = true];\n\n    // If this SourceCodeInfo represents a complete declaration, these are any\n    // comments appearing before and after the declaration which appear to be\n    // attached to the declaration.\n    //\n    // A series of line comments appearing on consecutive lines, with no other\n    // tokens appearing on those lines, will be treated as a single comment.\n    //\n    // leading_detached_comments will keep paragraphs of comments that appear\n    // before (but not connected to) the current element. Each paragraph,\n    // separated by empty lines, will be one comment element in the repeated\n    // field.\n    //\n    // Only the comment content is provided; comment markers (e.g. //) are\n    // stripped out.  For block comments, leading whitespace and an asterisk\n    // will be stripped from the beginning of each line other than the first.\n    // Newlines are included in the output.\n    //\n    // Examples:\n    //\n    //   optional int32 foo = 1;  // Comment attached to foo.\n    //   // Comment attached to bar.\n    //   optional int32 bar = 2;\n    //\n    //   optional string baz = 3;\n    //   // Comment attached to baz.\n    //   // Another line attached to baz.\n    //\n    //   // Comment attached to qux.\n    //   //\n    //   // Another line attached to qux.\n    //   optional double qux = 4;\n    //\n    //   // Detached comment for corge. This is not leading or trailing comments\n    //   // to qux or corge because there are blank lines separating it from\n    //   // both.\n    //\n    //   // Detached comment for corge paragraph 2.\n    //\n    //   optional string corge = 5;\n    //   /* Block comment attached\n    //    * to corge.  Leading asterisks\n    //    * will be removed. */\n    //   /* Block comment attached to\n    //    * grault. */\n    //   optional int32 grault = 6;\n    //\n    //   // ignored detached comments.\n    optional string leading_comments = 3;\n    optional string trailing_comments = 4;\n    repeated string leading_detached_comments = 6;\n  }\n}\n\n// Describes the relationship between generated code and its original source\n// file. A GeneratedCodeInfo message is associated with only one generated\n// source file, but may contain references to different source .proto files.\nmessage GeneratedCodeInfo {\n  // An Annotation connects some span of text in generated code to an element\n  // of its generating .proto file.\n  repeated Annotation annotation = 1;\n  message Annotation {\n    // Identifies the element in the original source .proto file. This field\n    // is formatted the same as SourceCodeInfo.Location.path.\n    repeated int32 path = 1 [packed = true];\n\n    // Identifies the filesystem path to the original source .proto.\n    optional string source_file = 2;\n\n    // Identifies the starting offset in bytes in the generated code\n    // that relates to the identified object.\n    optional int32 begin = 3;\n\n    // Identifies the ending offset in bytes in the generated code that\n    // relates to the identified offset. The end offset should be one past\n    // the last relevant byte (so the length of the text = end - begin).\n    optional int32 end = 4;\n  }\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/google/protobuf/empty.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption go_package = \"google.golang.org/protobuf/types/known/emptypb\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"EmptyProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\noption cc_enable_arenas = true;\n\n// A generic empty message that you can re-use to avoid defining duplicated\n// empty messages in your APIs. A typical example is to use it as the request\n// or the response type of an API method. For instance:\n//\n//     service Foo {\n//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n//     }\n//\n// The JSON representation for `Empty` is empty JSON object `{}`.\nmessage Empty {}\n"
  },
  {
    "path": "crates/shim-protos/vendor/google/protobuf/timestamp.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption cc_enable_arenas = true;\noption go_package = \"google.golang.org/protobuf/types/known/timestamppb\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"TimestampProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n// A Timestamp represents a point in time independent of any time zone or local\n// calendar, encoded as a count of seconds and fractions of seconds at\n// nanosecond resolution. The count is relative to an epoch at UTC midnight on\n// January 1, 1970, in the proleptic Gregorian calendar which extends the\n// Gregorian calendar backwards to year one.\n//\n// All minutes are 60 seconds long. Leap seconds are \"smeared\" so that no leap\n// second table is needed for interpretation, using a [24-hour linear\n// smear](https://developers.google.com/time/smear).\n//\n// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By\n// restricting to that range, we ensure that we can convert to and from [RFC\n// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.\n//\n// # Examples\n//\n// Example 1: Compute Timestamp from POSIX `time()`.\n//\n//     Timestamp timestamp;\n//     timestamp.set_seconds(time(NULL));\n//     timestamp.set_nanos(0);\n//\n// Example 2: Compute Timestamp from POSIX `gettimeofday()`.\n//\n//     struct timeval tv;\n//     gettimeofday(&tv, NULL);\n//\n//     Timestamp timestamp;\n//     timestamp.set_seconds(tv.tv_sec);\n//     timestamp.set_nanos(tv.tv_usec * 1000);\n//\n// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.\n//\n//     FILETIME ft;\n//     GetSystemTimeAsFileTime(&ft);\n//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;\n//\n//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z\n//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.\n//     Timestamp timestamp;\n//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));\n//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));\n//\n// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.\n//\n//     long millis = System.currentTimeMillis();\n//\n//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)\n//         .setNanos((int) ((millis % 1000) * 1000000)).build();\n//\n//\n// Example 5: Compute Timestamp from Java `Instant.now()`.\n//\n//     Instant now = Instant.now();\n//\n//     Timestamp timestamp =\n//         Timestamp.newBuilder().setSeconds(now.getEpochSecond())\n//             .setNanos(now.getNano()).build();\n//\n//\n// Example 6: Compute Timestamp from current time in Python.\n//\n//     timestamp = Timestamp()\n//     timestamp.GetCurrentTime()\n//\n// # JSON Mapping\n//\n// In JSON format, the Timestamp type is encoded as a string in the\n// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the\n// format is \"{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z\"\n// where {year} is always expressed using four digits while {month}, {day},\n// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional\n// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),\n// are optional. The \"Z\" suffix indicates the timezone (\"UTC\"); the timezone\n// is required. A proto3 JSON serializer should always use UTC (as indicated by\n// \"Z\") when printing the Timestamp type and a proto3 JSON parser should be\n// able to accept both UTC and other timezones (as indicated by an offset).\n//\n// For example, \"2017-01-15T01:30:15.01Z\" encodes 15.01 seconds past\n// 01:30 UTC on January 15, 2017.\n//\n// In JavaScript, one can convert a Date object to this format using the\n// standard\n// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)\n// method. In Python, a standard `datetime.datetime` object can be converted\n// to this format using\n// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with\n// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use\n// the Joda Time's [`ISODateTimeFormat.dateTime()`](\n// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D\n// ) to obtain a formatter capable of generating timestamps in this format.\n//\n//\nmessage Timestamp {\n  // Represents seconds of UTC time since Unix epoch\n  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n  // 9999-12-31T23:59:59Z inclusive.\n  int64 seconds = 1;\n\n  // Non-negative fractions of a second at nanosecond resolution. Negative\n  // second values with fractions must still have non-negative nanos values\n  // that count forward in time. Must be from 0 to 999,999,999\n  // inclusive.\n  int32 nanos = 2;\n}\n"
  },
  {
    "path": "crates/shim-protos/vendor/microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats/stats.proto",
    "content": "syntax = \"proto3\";\n\npackage containerd.runhcs.stats.v1;\n\nimport \"google/protobuf/timestamp.proto\";\nimport \"github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto\";\n\noption go_package = \"github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/stats;stats\";\n\nmessage Statistics {\n\toneof container {\n\t\tWindowsContainerStatistics windows = 1;\n\t\tio.containerd.cgroups.v1.Metrics linux = 2;\n\t}\n\tVirtualMachineStatistics vm = 3;\n}\n\nmessage WindowsContainerStatistics {\n\tgoogle.protobuf.Timestamp timestamp = 1;\n\tgoogle.protobuf.Timestamp container_start_time = 2;\n\tuint64 uptime_ns = 3;\n\tWindowsContainerProcessorStatistics processor = 4;\n\tWindowsContainerMemoryStatistics memory = 5;\n\tWindowsContainerStorageStatistics storage = 6;\n}\n\nmessage WindowsContainerProcessorStatistics {\n\tuint64 total_runtime_ns = 1;\n\tuint64 runtime_user_ns = 2;\n\tuint64 runtime_kernel_ns = 3;\n}\n\nmessage WindowsContainerMemoryStatistics {\n\tuint64 memory_usage_commit_bytes = 1;\n\tuint64 memory_usage_commit_peak_bytes = 2;\n\tuint64 memory_usage_private_working_set_bytes = 3;\n}\n\nmessage WindowsContainerStorageStatistics {\n\tuint64 read_count_normalized = 1;\n\tuint64 read_size_bytes = 2;\n\tuint64 write_count_normalized = 3;\n\tuint64 write_size_bytes = 4;\n}\n\nmessage VirtualMachineStatistics {\n\tVirtualMachineProcessorStatistics processor = 1;\n\tVirtualMachineMemoryStatistics memory = 2;\n}\n\nmessage VirtualMachineProcessorStatistics {\n\tuint64 total_runtime_ns = 1;\n}\n\nmessage VirtualMachineMemoryStatistics {\n\tuint64 working_set_bytes = 1;\n\tuint32 virtual_node_count = 2;\n\tVirtualMachineMemory vm_memory = 3;\n}\n\nmessage VirtualMachineMemory {\n\tint32 available_memory = 1;\n\tint32 available_memory_buffer = 2;\n\tuint64 reserved_memory = 3;\n\tuint64 assigned_memory = 4;\n\tbool slp_active = 5;\n\tbool balancing_enabled = 6;\n\tbool dm_operation_in_progress = 7;\n}\n"
  },
  {
    "path": "crates/snapshots/Cargo.toml",
    "content": "[package]\nname = \"containerd-snapshots\"\nversion = \"0.3.0\"\nauthors = [\n  \"Maksym Pavlenko <pavlenko.maksym@gmail.com>\",\n  \"The containerd Authors\",\n]\ndescription = \"Remote snapshotter extension for containerd\"\nkeywords = [\"containerd\", \"server\", \"grpc\", \"containers\"]\ncategories = [\"api-bindings\", \"asynchronous\"]\n\nedition.workspace = true\nlicense.workspace = true\nrepository.workspace = true\nhomepage.workspace = true\n\n[features]\ndocs = []\n\n[dependencies]\nasync-stream = \"0.3.6\"\nfutures = { workspace = true, features = [\"std\", \"alloc\"] }\nprost = { workspace = true, features = [\"derive\", \"std\"] }\nprost-types = { workspace = true, features = [\"std\"] }\nserde = { workspace = true, features = [\"derive\", \"std\"] }\nthiserror.workspace = true\ntonic = { workspace = true, features = [\"codegen\"] }\ntonic-prost.workspace = true\ntokio-stream = { version = \"0.1\", default-features = false }\n\n[dev-dependencies]\nfutures.workspace = true\nlog.workspace = true\nsimple_logger.workspace = true\ntokio = { workspace = true, features = [\"macros\", \"rt\", \"sync\", \"net\", \"io-util\"] }\ntonic = { workspace = true, features = [\"server\", \"router\"] }\n\n[build-dependencies]\ntonic-prost-build.workspace = true\n\n[package.metadata.docs.rs]\nfeatures = [\"docs\"]\n\n[package.metadata.cargo-machete]\nignored = [\"prost\", \"tonic-prost\"]\n"
  },
  {
    "path": "crates/snapshots/README.md",
    "content": "# Remote snapshotter extension for containerd\n\n[![Crates.io](https://img.shields.io/crates/v/containerd-snapshots)](https://crates.io/crates/containerd-snapshots)\n[![docs.rs](https://img.shields.io/docsrs/containerd-snapshots)](https://docs.rs/containerd-snapshots/latest/containerd_snapshots/)\n[![Crates.io](https://img.shields.io/crates/l/containerd-shim-logging)](https://github.com/containerd/rust-extensions/blob/main/LICENSE)\n[![CI](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/containerd/rust-extensions/actions/workflows/ci.yml)\n\nSnapshots crate implements containerd's proxy plugin for snapshotting. It aims hide the underlying complexity of GRPC\ninterfaces, streaming, and request/response conversions and provide one `Snapshots` trait to implement.\n\n[containerd Documentation](https://github.com/containerd/containerd/blob/main/docs/PLUGINS.md#proxy-plugins)\n\n## Proxy plugins\n\nA proxy plugin is configured using containerd's config file and will be loaded alongside the internal plugins when\ncontainerd is started. These plugins are connected to containerd using a local socket serving one of containerd's GRPC\nAPI services. Each plugin is configured with a type and name just as internal plugins are.\n\n## How to use from containerd\n\nAdd the following to containerd's configuration file:\n```toml\n[proxy_plugins]\n  [proxy_plugins.custom]\n    type = \"snapshot\"\n    address = \"/tmp/snap2.sock\"\n```\n\nStart daemons and try pulling an image with `custom` snapshotter:\n```bash\n# Start containerd daemon\n$ containerd --config /path/config.toml\n\n# Run remote snapshotter instance\n$ cargo run --example snapshotter /tmp/snap2.sock\n\n# Now specify the snapshotter when pulling an image\n$ ctr i pull --snapshotter custom docker.io/library/hello-world:latest\n```\n\n## Getting started\n\nSnapshotters are required to implement `Snapshotter` trait (which is very similar to containerd's\n[Snapshotter](https://github.com/containerd/containerd/blob/main/core/snapshots/snapshotter.go) interface).\n\n```rust,ignore\nuse std::collections::HashMap;\n\nuse containerd_snapshots as snapshots;\nuse containerd_snapshots::{api, Info, Usage};\nuse log::info;\n\n#[derive(Default)]\nstruct Example;\n\n#[snapshots::tonic::async_trait]\nimpl snapshots::Snapshotter for Example {\n    type Error = snapshots::tonic::Status;\n\n    async fn stat(&self, key: String) -> Result<Info, Self::Error> {\n        info!(\"Stat: {}\", key);\n        Ok(Info::default())\n    }\n\n    // ...\n\n    async fn commit(\n        &self,\n        name: String,\n        key: String,\n        labels: HashMap<String, String>,\n    ) -> Result<(), Self::Error> {\n        info!(\"Commit: name={}, key={}, labels={:?}\", name, key, labels);\n        Ok(())\n    }\n}\n```\n\nThe library provides `snapshots::server` for convenience to wrap the implementation into a GRPC server, so it can\nbe used with `tonic` like this:\n\n```rust,ignore\nuse snapshots::tonic::transport::Server;\n\nServer::builder()\n    .add_service(snapshots::server(example))\n    .serve_with_incoming(incoming)\n    .await\n    .expect(\"Serve failed\");\n```\n"
  },
  {
    "path": "crates/snapshots/build.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{env, fs, io};\n\nconst PROTO_FILES: &[&str] = &[\"types/mount.proto\", \"services/snapshots/v1/snapshots.proto\"];\n\nconst FIXUP_MODULES: &[&str] = &[\"containerd.services.snapshots.v1\"];\n\nfn main() {\n    tonic_prost_build::configure()\n        .build_server(true)\n        .compile_protos(\n            PROTO_FILES,\n            &[\"vendor/github.com/containerd/containerd/api/\", \"vendor/\"],\n        )\n        .expect(\"Failed to generate GRPC bindings\");\n\n    for module in FIXUP_MODULES {\n        fixup_imports(module).expect(\"Failed to fixup module\");\n    }\n}\n\n// Original containerd's protobuf files contain Go style imports:\n// import \"github.com/containerd/containerd/api/types/mount.proto\";\n//\n// Tonic produces invalid code for these imports:\n// error[E0433]: failed to resolve: there are too many leading `super` keywords\n//   --> /containerd-rust-extensions/target/debug/build/containerd-client-protos-0a328c0c63f60cd0/out/containerd.services.diff.v1.rs:47:52\n//    |\n// 47 |     pub diff: ::core::option::Option<super::super::super::types::Descriptor>,\n//    |                                                    ^^^^^ there are too many leading `super` keywords\n//\n// This func fixes imports to crate level ones, like `crate::types::Mount`\nfn fixup_imports(path: &str) -> Result<(), io::Error> {\n    let out_dir = env::var(\"OUT_DIR\").unwrap();\n    let path = format!(\"{}/{}.rs\", out_dir, path);\n\n    let contents = fs::read_to_string(&path)?\n        .replace(\"super::super::super::types\", \"crate::api::types\")\n        .replace(\n            \"/// \tfilters\\\\[0\\\\] or filters\\\\[1\\\\] or ... or filters\\\\[n-1\\\\] or filters\\\\[n\\\\]\",\n            r#\"\n            /// ```notrust\n            /// \tfilters[0] or filters[1] or ... or filters[n-1] or filters[n]\n            /// ```\"#,\n        );\n\n    fs::write(path, contents)?;\n    Ok(())\n}\n"
  },
  {
    "path": "crates/snapshots/examples/snapshotter.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nuse std::{\n    collections::HashMap,\n    env,\n    pin::Pin,\n    sync::Arc,\n    task::{Context, Poll},\n};\n\nuse containerd_snapshots as snapshots;\nuse containerd_snapshots::{api, Info, Usage};\nuse futures::TryFutureExt;\nuse log::info;\nuse snapshots::tonic::transport::Server;\nuse tokio::net::UnixListener;\nuse tokio_stream::Stream;\n\n#[derive(Default)]\nstruct Example;\n\n#[snapshots::tonic::async_trait]\nimpl snapshots::Snapshotter for Example {\n    type Error = snapshots::tonic::Status;\n\n    async fn stat(&self, key: String) -> Result<Info, Self::Error> {\n        info!(\"Stat: {}\", key);\n        Ok(Info::default())\n    }\n\n    async fn update(\n        &self,\n        info: Info,\n        fieldpaths: Option<Vec<String>>,\n    ) -> Result<Info, Self::Error> {\n        info!(\"Update: info={:?}, fieldpaths={:?}\", info, fieldpaths);\n        Ok(Info::default())\n    }\n\n    async fn usage(&self, key: String) -> Result<Usage, Self::Error> {\n        info!(\"Usage: {}\", key);\n        Ok(Usage::default())\n    }\n\n    async fn mounts(&self, key: String) -> Result<Vec<api::types::Mount>, Self::Error> {\n        info!(\"Mounts: {}\", key);\n        Ok(Vec::new())\n    }\n\n    async fn prepare(\n        &self,\n        key: String,\n        parent: String,\n        labels: HashMap<String, String>,\n    ) -> Result<Vec<api::types::Mount>, Self::Error> {\n        info!(\n            \"Prepare: key={}, parent={}, labels={:?}\",\n            key, parent, labels\n        );\n        Ok(Vec::new())\n    }\n\n    async fn view(\n        &self,\n        key: String,\n        parent: String,\n        labels: HashMap<String, String>,\n    ) -> Result<Vec<api::types::Mount>, Self::Error> {\n        info!(\"View: key={}, parent={}, labels={:?}\", key, parent, labels);\n        Ok(Vec::new())\n    }\n\n    async fn commit(\n        &self,\n        name: String,\n        key: String,\n        labels: HashMap<String, String>,\n    ) -> Result<(), Self::Error> {\n        info!(\"Commit: name={}, key={}, labels={:?}\", name, key, labels);\n        Ok(())\n    }\n\n    async fn remove(&self, key: String) -> Result<(), Self::Error> {\n        info!(\"Remove: {}\", key);\n        Ok(())\n    }\n\n    type InfoStream = EmptyStream;\n    async fn list(\n        &self,\n        snapshotter: String,\n        filters: Vec<String>,\n    ) -> Result<Self::InfoStream, Self::Error> {\n        info!(\"List: snapshotter={}, filters={:?}\", snapshotter, filters);\n        // Returns no snapshots.\n        Ok(EmptyStream)\n    }\n}\n\nstruct EmptyStream;\nimpl Stream for EmptyStream {\n    type Item = Result<Info, snapshots::tonic::Status>;\n    fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {\n        Poll::Ready(None)\n    }\n}\n\n#[cfg(unix)]\n#[tokio::main(flavor = \"current_thread\")]\nasync fn main() {\n    simple_logger::SimpleLogger::new()\n        .init()\n        .expect(\"Failed to initialize logger\");\n\n    let args = env::args().collect::<Vec<_>>();\n\n    let socket_path = args\n        .get(1)\n        .ok_or(\"First argument must be socket path\")\n        .unwrap();\n\n    let example = Example;\n\n    let incoming = {\n        let uds = UnixListener::bind(socket_path).expect(\"Failed to bind listener\");\n\n        async_stream::stream! {\n            loop {\n                let item = uds.accept().map_ok(|(st, _)| unix::UnixStream(st)).await;\n                yield item;\n            }\n        }\n    };\n\n    Server::builder()\n        .add_service(snapshots::server(Arc::new(example)))\n        .serve_with_incoming(incoming)\n        .await\n        .expect(\"Serve failed\");\n}\n\n// Copy-pasted from https://github.com/hyperium/tonic/blob/master/examples/src/uds/server.rs#L69\n#[cfg(unix)]\nmod unix {\n    use std::{\n        pin::Pin,\n        sync::Arc,\n        task::{Context, Poll},\n    };\n\n    use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};\n    use tonic::transport::server::Connected;\n\n    #[derive(Debug)]\n    pub struct UnixStream(pub tokio::net::UnixStream);\n\n    impl Connected for UnixStream {\n        type ConnectInfo = UdsConnectInfo;\n\n        fn connect_info(&self) -> Self::ConnectInfo {\n            UdsConnectInfo {\n                peer_addr: self.0.peer_addr().ok().map(Arc::new),\n                peer_cred: self.0.peer_cred().ok(),\n            }\n        }\n    }\n\n    #[allow(dead_code)]\n    #[derive(Clone, Debug)]\n    pub struct UdsConnectInfo {\n        pub peer_addr: Option<Arc<tokio::net::unix::SocketAddr>>,\n        pub peer_cred: Option<tokio::net::unix::UCred>,\n    }\n\n    impl AsyncRead for UnixStream {\n        fn poll_read(\n            mut self: Pin<&mut Self>,\n            cx: &mut Context<'_>,\n            buf: &mut ReadBuf<'_>,\n        ) -> Poll<std::io::Result<()>> {\n            Pin::new(&mut self.0).poll_read(cx, buf)\n        }\n    }\n\n    impl AsyncWrite for UnixStream {\n        fn poll_write(\n            mut self: Pin<&mut Self>,\n            cx: &mut Context<'_>,\n            buf: &[u8],\n        ) -> Poll<std::io::Result<usize>> {\n            Pin::new(&mut self.0).poll_write(cx, buf)\n        }\n\n        fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {\n            Pin::new(&mut self.0).poll_flush(cx)\n        }\n\n        fn poll_shutdown(\n            mut self: Pin<&mut Self>,\n            cx: &mut Context<'_>,\n        ) -> Poll<std::io::Result<()>> {\n            Pin::new(&mut self.0).poll_shutdown(cx)\n        }\n    }\n}\n\n#[cfg(not(unix))]\nfn main() {\n    panic!(\"The snapshotter example only works on unix systems!\");\n}\n"
  },
  {
    "path": "crates/snapshots/rsync.txt",
    "content": "api/services/snapshots/v1/snapshots.proto\napi/types/mount.proto\n"
  },
  {
    "path": "crates/snapshots/src/convert.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n//! Various conversions between GRPC and native types.\n\nuse std::convert::{TryFrom, TryInto};\n\nuse thiserror::Error;\nuse tonic::Status;\n\nuse crate::{api::snapshots::v1 as grpc, Info, Kind};\n\nimpl From<Kind> for i32 {\n    fn from(kind: Kind) -> i32 {\n        match kind {\n            Kind::Unknown => 0,\n            Kind::View => 1,\n            Kind::Active => 2,\n            Kind::Committed => 3,\n        }\n    }\n}\n\nimpl TryFrom<i32> for Kind {\n    type Error = Error;\n\n    fn try_from(value: i32) -> Result<Self, Self::Error> {\n        Ok(match value {\n            0 => Kind::Unknown,\n            1 => Kind::View,\n            2 => Kind::Active,\n            3 => Kind::Committed,\n            _ => return Err(Error::InvalidEnumValue(value)),\n        })\n    }\n}\n\nimpl TryFrom<grpc::Info> for Info {\n    type Error = Error;\n\n    fn try_from(info: grpc::Info) -> Result<Self, Self::Error> {\n        Ok(Info {\n            kind: info.kind.try_into()?,\n            name: info.name,\n            parent: info.parent,\n            labels: info.labels,\n            created_at: info.created_at.unwrap_or_default().try_into()?,\n            updated_at: info.updated_at.unwrap_or_default().try_into()?,\n        })\n    }\n}\n\nimpl From<Info> for grpc::Info {\n    fn from(info: Info) -> Self {\n        grpc::Info {\n            name: info.name,\n            parent: info.parent,\n            kind: info.kind.into(),\n            created_at: Some(info.created_at.into()),\n            updated_at: Some(info.updated_at.into()),\n            labels: info.labels,\n        }\n    }\n}\n\n#[derive(Debug, Error)]\npub enum Error {\n    #[error(\"Failed to convert GRPC timestamp: {0}\")]\n    Timestamp(#[from] prost_types::TimestampError),\n\n    #[error(\"Invalid enum value: {0}\")]\n    InvalidEnumValue(i32),\n}\n\nimpl From<Error> for tonic::Status {\n    fn from(err: Error) -> Self {\n        Status::internal(format!(\"{}\", err))\n    }\n}\n"
  },
  {
    "path": "crates/snapshots/src/lib.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n#![cfg_attr(feature = \"docs\", doc = include_str!(\"../README.md\"))]\n// No way to derive Eq with tonic :(\n// See https://github.com/hyperium/tonic/issues/1056\n#![allow(clippy::derive_partial_eq_without_eq)]\n\nuse std::{collections::HashMap, fmt::Debug, ops::AddAssign, time::SystemTime};\n\nuse serde::{Deserialize, Serialize};\nuse tokio_stream::Stream;\npub use tonic;\n\nmod convert;\nmod wrap;\n\npub use wrap::server;\n\n/// Generated GRPC apis.\npub mod api {\n    #![allow(clippy::tabs_in_doc_comments)]\n    #![allow(rustdoc::invalid_rust_codeblocks)]\n\n    /// Generated snapshots bindings.\n    pub mod snapshots {\n        pub mod v1 {\n            tonic::include_proto!(\"containerd.services.snapshots.v1\");\n        }\n    }\n\n    /// Generated `containerd.types` types.\n    pub mod types {\n        tonic::include_proto!(\"containerd.types\");\n    }\n}\n\n/// Snapshot kinds.\n#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Default)]\npub enum Kind {\n    #[default]\n    Unknown,\n    View,\n    Active,\n    Committed,\n}\n\n/// Information about a particular snapshot.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Info {\n    /// Active or committed snapshot.\n    pub kind: Kind,\n    /// Name of key of snapshot.\n    pub name: String,\n    /// Name of parent snapshot.\n    pub parent: String,\n    /// Labels for a snapshot.\n    pub labels: HashMap<String, String>,\n    /// Created time.\n    pub created_at: SystemTime,\n    /// Last updated time.\n    pub updated_at: SystemTime,\n}\n\nimpl Default for Info {\n    fn default() -> Self {\n        Info {\n            kind: Default::default(),\n            name: Default::default(),\n            parent: Default::default(),\n            labels: Default::default(),\n            created_at: SystemTime::now(),\n            updated_at: SystemTime::now(),\n        }\n    }\n}\n\n/// Defines statistics for disk resources consumed by the snapshot.\n///\n// These resources only include the resources consumed by the snapshot itself and does not include\n// resources usage by the parent.\n#[derive(Debug, Clone, Copy, Default)]\npub struct Usage {\n    /// Number of inodes in use.\n    pub inodes: i64,\n    /// Provides usage of snapshot in bytes.\n    pub size: i64,\n}\n\n/// Add the provided usage to the current usage.\nimpl AddAssign for Usage {\n    fn add_assign(&mut self, rhs: Self) {\n        self.inodes += rhs.inodes;\n        self.size += rhs.size;\n    }\n}\n\n/// Snapshotter defines the methods required to implement a snapshot snapshotter for\n/// allocating, snapshotting and mounting filesystem changesets. The model works\n/// by building up sets of changes with parent-child relationships.\n///\n/// A snapshot represents a filesystem state. Every snapshot has a parent, where\n/// the empty parent is represented by the empty string. A diff can be taken\n/// between a parent and its snapshot to generate a classic layer.\n#[tonic::async_trait]\npub trait Snapshotter: Send + Sync + 'static {\n    /// Error type returned from the underlying snapshotter implementation.\n    ///\n    /// This type must be convertable to GRPC status.\n    type Error: Debug + Into<tonic::Status> + Send;\n\n    /// Returns the info for an active or committed snapshot by name or key.\n    ///\n    /// Should be used for parent resolution, existence checks and to discern\n    /// the kind of snapshot.\n    async fn stat(&self, key: String) -> Result<Info, Self::Error>;\n\n    /// Update updates the info for a snapshot.\n    ///\n    /// Only mutable properties of a snapshot may be updated.\n    async fn update(\n        &self,\n        info: Info,\n        fieldpaths: Option<Vec<String>>,\n    ) -> Result<Info, Self::Error>;\n\n    /// Usage returns the resource usage of an active or committed snapshot\n    /// excluding the usage of parent snapshots.\n    ///\n    /// The running time of this call for active snapshots is dependent on\n    /// implementation, but may be proportional to the size of the resource.\n    /// Callers should take this into consideration.\n    async fn usage(&self, key: String) -> Result<Usage, Self::Error>;\n\n    /// Mounts returns the mounts for the active snapshot transaction identified\n    /// by key.\n    ///\n    /// Can be called on an read-write or readonly transaction. This is\n    /// available only for active snapshots.\n    ///\n    /// This can be used to recover mounts after calling View or Prepare.\n    async fn mounts(&self, key: String) -> Result<Vec<api::types::Mount>, Self::Error>;\n\n    /// Creates an active snapshot identified by key descending from the provided parent.\n    /// The returned mounts can be used to mount the snapshot to capture changes.\n    ///\n    /// If a parent is provided, after performing the mounts, the destination will start\n    /// with the content of the parent. The parent must be a committed snapshot.\n    /// Changes to the mounted destination will be captured in relation to the parent.\n    /// The default parent, \"\", is an empty directory.\n    ///\n    /// The changes may be saved to a committed snapshot by calling [Snapshotter::commit]. When\n    /// one is done with the transaction, [Snapshotter::remove] should be called on the key.\n    ///\n    /// Multiple calls to [Snapshotter::prepare] or [Snapshotter::view] with the same key should fail.\n    async fn prepare(\n        &self,\n        key: String,\n        parent: String,\n        labels: HashMap<String, String>,\n    ) -> Result<Vec<api::types::Mount>, Self::Error>;\n\n    /// View behaves identically to [Snapshotter::prepare] except the result may not be\n    /// committed back to the snapshot snapshotter. View call returns a readonly view on\n    /// the parent, with the active snapshot being tracked by the given key.\n    ///\n    /// This method operates identically to [Snapshotter::prepare], except that mounts returned\n    /// may have the readonly flag set. Any modifications to the underlying\n    /// filesystem will be ignored. Implementations may perform this in a more\n    /// efficient manner that differs from what would be attempted with [Snapshotter::prepare].\n    ///\n    /// Commit may not be called on the provided key and will return an error.\n    /// To collect the resources associated with key, [Snapshotter::remove] must be called with\n    /// key as the argument.\n    async fn view(\n        &self,\n        key: String,\n        parent: String,\n        labels: HashMap<String, String>,\n    ) -> Result<Vec<api::types::Mount>, Self::Error>;\n\n    /// Capture the changes between key and its parent into a snapshot identified by name.\n    ///\n    /// The name can then be used with the snapshotter's other methods to create subsequent snapshots.\n    ///\n    /// A committed snapshot will be created under name with the parent of the\n    /// active snapshot.\n    ///\n    /// After commit, the snapshot identified by key is removed.\n    async fn commit(\n        &self,\n        name: String,\n        key: String,\n        labels: HashMap<String, String>,\n    ) -> Result<(), Self::Error>;\n\n    /// Remove the committed or active snapshot by the provided key.\n    ///\n    /// All resources associated with the key will be removed.\n    ///\n    /// If the snapshot is a parent of another snapshot, its children must be\n    /// removed before proceeding.\n    async fn remove(&self, key: String) -> Result<(), Self::Error>;\n\n    /// Cleaner defines a type capable of performing asynchronous resource cleanup.\n    ///\n    /// Cleaner interface should be used by snapshotters which implement fast\n    /// removal and deferred resource cleanup. This prevents snapshots from needing\n    /// to perform lengthy resource cleanup before acknowledging a snapshot key\n    /// has been removed and available for re-use. This is also useful when\n    /// performing multi-key removal with the intent of cleaning up all the\n    /// resources after each snapshot key has been removed.\n    async fn clear(&self) -> Result<(), Self::Error> {\n        Ok(())\n    }\n\n    /// The type of the stream that returns all snapshots.\n    ///\n    /// An instance of this type is returned by [`Snapshotter::list`] on success.\n    type InfoStream: Stream<Item = Result<Info, Self::Error>> + Send + 'static;\n\n    /// Returns a stream containing all snapshots.\n    ///\n    /// Once `type_alias_impl_trait` is stabilized or if the implementer is willing to use unstable\n    /// features, this function can be implemented using `try_stream` and `yield`. For example, a\n    /// function that lists a single snapshot with the default values would be implemented as\n    /// follows:\n    ///\n    ///```ignore\n    ///     type InfoStream = impl Stream<Item = Result<Info, Self::Error>> + Send + 'static;\n    ///     fn list(&self) -> Result<Self::InfoStream, Self::Error> {\n    ///         Ok(async_stream::try_stream! {\n    ///             yield Info::default();\n    ///         })\n    ///     }\n    /// ```\n    async fn list(\n        &self,\n        snapshotter: String,\n        filters: Vec<String>,\n    ) -> Result<Self::InfoStream, Self::Error>;\n}\n"
  },
  {
    "path": "crates/snapshots/src/wrap.rs",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\n//! Trait wrapper to server GRPC requests.\n\nuse std::{convert::TryInto, mem, sync::Arc};\n\nuse futures::{stream::BoxStream, StreamExt};\n\nuse crate::{\n    api::snapshots::v1::{\n        snapshots_server::{Snapshots, SnapshotsServer},\n        *,\n    },\n    Snapshotter,\n};\n\npub struct Wrapper<S: Snapshotter> {\n    snapshotter: Arc<S>,\n}\n\n/// Helper to create snapshots server from any object that implements [Snapshotter] trait.\npub fn server<S: Snapshotter>(snapshotter: Arc<S>) -> SnapshotsServer<Wrapper<S>> {\n    SnapshotsServer::new(Wrapper { snapshotter })\n}\n\n#[tonic::async_trait]\nimpl<S: Snapshotter> Snapshots for Wrapper<S> {\n    async fn prepare(\n        &self,\n        request: tonic::Request<PrepareSnapshotRequest>,\n    ) -> Result<tonic::Response<PrepareSnapshotResponse>, tonic::Status> {\n        let request = request.into_inner();\n\n        let mounts = self\n            .snapshotter\n            .prepare(request.key, request.parent, request.labels)\n            .await\n            .map_err(Into::into)?;\n        let message = PrepareSnapshotResponse { mounts };\n        Ok(tonic::Response::new(message))\n    }\n\n    async fn view(\n        &self,\n        request: tonic::Request<ViewSnapshotRequest>,\n    ) -> Result<tonic::Response<ViewSnapshotResponse>, tonic::Status> {\n        let request = request.into_inner();\n        let mounts = self\n            .snapshotter\n            .view(request.key, request.parent, request.labels)\n            .await\n            .map_err(Into::into)?;\n        let message = ViewSnapshotResponse { mounts };\n        Ok(tonic::Response::new(message))\n    }\n\n    async fn mounts(\n        &self,\n        request: tonic::Request<MountsRequest>,\n    ) -> Result<tonic::Response<MountsResponse>, tonic::Status> {\n        let request = request.into_inner();\n        let mounts = self\n            .snapshotter\n            .mounts(request.key)\n            .await\n            .map_err(Into::into)?;\n        let message = MountsResponse { mounts };\n        Ok(tonic::Response::new(message))\n    }\n\n    async fn commit(\n        &self,\n        request: tonic::Request<CommitSnapshotRequest>,\n    ) -> Result<tonic::Response<()>, tonic::Status> {\n        let request = request.into_inner();\n        self.snapshotter\n            .commit(request.name, request.key, request.labels)\n            .await\n            .map_err(Into::into)?;\n        Ok(tonic::Response::new(()))\n    }\n\n    async fn remove(\n        &self,\n        request: tonic::Request<RemoveSnapshotRequest>,\n    ) -> Result<tonic::Response<()>, tonic::Status> {\n        let request = request.into_inner();\n        self.snapshotter\n            .remove(request.key)\n            .await\n            .map_err(Into::into)?;\n        Ok(tonic::Response::new(()))\n    }\n\n    async fn stat(\n        &self,\n        request: tonic::Request<StatSnapshotRequest>,\n    ) -> Result<tonic::Response<StatSnapshotResponse>, tonic::Status> {\n        let request = request.into_inner();\n        let info = self\n            .snapshotter\n            .stat(request.key)\n            .await\n            .map_err(Into::into)?;\n        let message = StatSnapshotResponse {\n            info: Some(info.into()),\n        };\n        Ok(tonic::Response::new(message))\n    }\n\n    async fn update(\n        &self,\n        request: tonic::Request<UpdateSnapshotRequest>,\n    ) -> Result<tonic::Response<UpdateSnapshotResponse>, tonic::Status> {\n        let request = request.into_inner();\n        let info = match request.info {\n            Some(info) => info,\n            None => return Err(tonic::Status::failed_precondition(\"info is required\")),\n        };\n\n        let info = match info.try_into() {\n            Ok(info) => info,\n            Err(err) => {\n                let msg = format!(\"Failed to convert timestamp: {}\", err);\n                return Err(tonic::Status::invalid_argument(msg));\n            }\n        };\n\n        let fields = request.update_mask.map(|mask| mask.paths);\n\n        let info = self\n            .snapshotter\n            .update(info, fields)\n            .await\n            .map_err(Into::into)?;\n        let message = UpdateSnapshotResponse {\n            info: Some(info.into()),\n        };\n\n        Ok(tonic::Response::new(message))\n    }\n\n    type ListStream = BoxStream<Result<ListSnapshotsResponse, tonic::Status>, 'static>;\n\n    async fn list(\n        &self,\n        request: tonic::Request<ListSnapshotsRequest>,\n    ) -> Result<tonic::Response<Self::ListStream>, tonic::Status> {\n        let request = request.into_inner();\n        let sn = self.snapshotter.clone();\n        let output = async_stream::try_stream! {\n            let walk_stream = sn.list(request.snapshotter, request.filters).await?;\n            let mut walk_stream = std::pin::pin!(walk_stream);\n            let mut infos = Vec::<Info>::new();\n            while let Some(info) = walk_stream.next().await {\n                infos.push(info?.into());\n                if infos.len() >= 100 {\n                    yield ListSnapshotsResponse { info: mem::take(&mut infos) };\n                }\n            }\n\n            if !infos.is_empty() {\n                yield ListSnapshotsResponse { info: infos };\n            }\n        };\n        Ok(tonic::Response::new(Box::pin(output)))\n    }\n\n    async fn usage(\n        &self,\n        request: tonic::Request<UsageRequest>,\n    ) -> Result<tonic::Response<UsageResponse>, tonic::Status> {\n        let request = request.into_inner();\n\n        let usage = self\n            .snapshotter\n            .usage(request.key)\n            .await\n            .map_err(Into::into)?;\n        let message = UsageResponse {\n            size: usage.size,\n            inodes: usage.inodes,\n        };\n\n        Ok(tonic::Response::new(message))\n    }\n\n    async fn cleanup(\n        &self,\n        _request: tonic::Request<CleanupRequest>,\n    ) -> Result<tonic::Response<()>, tonic::Status> {\n        self.snapshotter.clear().await.map_err(Into::into)?;\n        Ok(tonic::Response::new(()))\n    }\n}\n"
  },
  {
    "path": "crates/snapshots/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.snapshots.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/field_mask.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/mount.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/snapshots/v1;snapshots\";\n\n// Snapshot service manages snapshots\nservice Snapshots {\n  rpc Prepare(PrepareSnapshotRequest) returns (PrepareSnapshotResponse);\n  rpc View(ViewSnapshotRequest) returns (ViewSnapshotResponse);\n  rpc Mounts(MountsRequest) returns (MountsResponse);\n  rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty);\n  rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty);\n  rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse);\n  rpc Update(UpdateSnapshotRequest) returns (UpdateSnapshotResponse);\n  rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse);\n  rpc Usage(UsageRequest) returns (UsageResponse);\n  rpc Cleanup(CleanupRequest) returns (google.protobuf.Empty);\n}\n\nmessage PrepareSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n  string parent = 3;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 4;\n}\n\nmessage PrepareSnapshotResponse {\n  repeated containerd.types.Mount mounts = 1;\n}\n\nmessage ViewSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n  string parent = 3;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 4;\n}\n\nmessage ViewSnapshotResponse {\n  repeated containerd.types.Mount mounts = 1;\n}\n\nmessage MountsRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nmessage MountsResponse {\n  repeated containerd.types.Mount mounts = 1;\n}\n\nmessage RemoveSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nmessage CommitSnapshotRequest {\n  string snapshotter = 1;\n  string name = 2;\n  string key = 3;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 4;\n\n  string parent = 5;\n}\n\nmessage StatSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nenum Kind {\n  UNKNOWN = 0;\n  VIEW = 1;\n  ACTIVE = 2;\n  COMMITTED = 3;\n}\n\nmessage Info {\n  string name = 1;\n  string parent = 2;\n  Kind kind = 3;\n\n  // CreatedAt provides the time at which the snapshot was created.\n  google.protobuf.Timestamp created_at = 4;\n\n  // UpdatedAt provides the time the info was last updated.\n  google.protobuf.Timestamp updated_at = 5;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 6;\n}\n\nmessage StatSnapshotResponse {\n  Info info = 1;\n}\n\nmessage UpdateSnapshotRequest {\n  string snapshotter = 1;\n  Info info = 2;\n\n  // UpdateMask specifies which fields to perform the update on. If empty,\n  // the operation applies to all fields.\n  //\n  // In info, Name, Parent, Kind, Created are immutable,\n  // other field may be updated using this mask.\n  // If no mask is provided, all mutable field are updated.\n  google.protobuf.FieldMask update_mask = 3;\n}\n\nmessage UpdateSnapshotResponse {\n  Info info = 1;\n}\n\nmessage ListSnapshotsRequest {\n  string snapshotter = 1;\n\n  // Filters contains one or more filters using the syntax defined in the\n  // containerd filter package.\n  //\n  // The returned result will be those that match any of the provided\n  // filters. Expanded, images that match the following will be\n  // returned:\n  //\n  //\tfilters[0] or filters[1] or ... or filters[n-1] or filters[n]\n  //\n  // If filters is zero-length or nil, all items will be returned.\n  repeated string filters = 2;\n}\n\nmessage ListSnapshotsResponse {\n  repeated Info info = 1;\n}\n\nmessage UsageRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nmessage UsageResponse {\n  int64 size = 1;\n  int64 inodes = 2;\n}\n\nmessage CleanupRequest {\n  string snapshotter = 1;\n}\n"
  },
  {
    "path": "crates/snapshots/vendor/github.com/containerd/containerd/api/types/mount.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Mount describes mounts for a container.\n//\n// This type is the lingua franca of ContainerD. All services provide mounts\n// to be used with the container at creation time.\n//\n// The Mount type follows the structure of the mount syscall, including a type,\n// source, target and options.\nmessage Mount {\n  // Type defines the nature of the mount.\n  string type = 1;\n\n  // Source specifies the name of the mount. Depending on mount type, this\n  // may be a volume name or a host path, or even ignored.\n  string source = 2;\n\n  // Target path in container\n  string target = 3;\n\n  // Options specifies zero or more fstab style mount options.\n  repeated string options = 4;\n}\n\nmessage ActiveMount {\n  Mount mount = 1;\n\n  google.protobuf.Timestamp mounted_at = 2;\n\n  string mount_point = 3;\n\n  map<string, string> data = 4;\n}\n\nmessage ActivationInfo {\n  string name = 1;\n\n  repeated ActiveMount active = 2;\n\n  repeated Mount system = 3;\n\n  map<string, string> labels = 4;\n}\n"
  },
  {
    "path": "crates/snapshots/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.services.snapshots.v1;\n\nimport \"google/protobuf/empty.proto\";\nimport \"google/protobuf/field_mask.proto\";\nimport \"google/protobuf/timestamp.proto\";\nimport \"types/mount.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/services/snapshots/v1;snapshots\";\n\n// Snapshot service manages snapshots\nservice Snapshots {\n  rpc Prepare(PrepareSnapshotRequest) returns (PrepareSnapshotResponse);\n  rpc View(ViewSnapshotRequest) returns (ViewSnapshotResponse);\n  rpc Mounts(MountsRequest) returns (MountsResponse);\n  rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty);\n  rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty);\n  rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse);\n  rpc Update(UpdateSnapshotRequest) returns (UpdateSnapshotResponse);\n  rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse);\n  rpc Usage(UsageRequest) returns (UsageResponse);\n  rpc Cleanup(CleanupRequest) returns (google.protobuf.Empty);\n}\n\nmessage PrepareSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n  string parent = 3;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 4;\n}\n\nmessage PrepareSnapshotResponse {\n  repeated containerd.types.Mount mounts = 1;\n}\n\nmessage ViewSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n  string parent = 3;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 4;\n}\n\nmessage ViewSnapshotResponse {\n  repeated containerd.types.Mount mounts = 1;\n}\n\nmessage MountsRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nmessage MountsResponse {\n  repeated containerd.types.Mount mounts = 1;\n}\n\nmessage RemoveSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nmessage CommitSnapshotRequest {\n  string snapshotter = 1;\n  string name = 2;\n  string key = 3;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 4;\n\n  string parent = 5;\n}\n\nmessage StatSnapshotRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nenum Kind {\n  UNKNOWN = 0;\n  VIEW = 1;\n  ACTIVE = 2;\n  COMMITTED = 3;\n}\n\nmessage Info {\n  string name = 1;\n  string parent = 2;\n  Kind kind = 3;\n\n  // CreatedAt provides the time at which the snapshot was created.\n  google.protobuf.Timestamp created_at = 4;\n\n  // UpdatedAt provides the time the info was last updated.\n  google.protobuf.Timestamp updated_at = 5;\n\n  // Labels are arbitrary data on snapshots.\n  //\n  // The combined size of a key/value pair cannot exceed 4096 bytes.\n  map<string, string> labels = 6;\n}\n\nmessage StatSnapshotResponse {\n  Info info = 1;\n}\n\nmessage UpdateSnapshotRequest {\n  string snapshotter = 1;\n  Info info = 2;\n\n  // UpdateMask specifies which fields to perform the update on. If empty,\n  // the operation applies to all fields.\n  //\n  // In info, Name, Parent, Kind, Created are immutable,\n  // other field may be updated using this mask.\n  // If no mask is provided, all mutable field are updated.\n  google.protobuf.FieldMask update_mask = 3;\n}\n\nmessage UpdateSnapshotResponse {\n  Info info = 1;\n}\n\nmessage ListSnapshotsRequest {\n  string snapshotter = 1;\n\n  // Filters contains one or more filters using the syntax defined in the\n  // containerd filter package.\n  //\n  // The returned result will be those that match any of the provided\n  // filters. Expanded, images that match the following will be\n  // returned:\n  //\n  //\tfilters[0] or filters[1] or ... or filters[n-1] or filters[n]\n  //\n  // If filters is zero-length or nil, all items will be returned.\n  repeated string filters = 2;\n}\n\nmessage ListSnapshotsResponse {\n  repeated Info info = 1;\n}\n\nmessage UsageRequest {\n  string snapshotter = 1;\n  string key = 2;\n}\n\nmessage UsageResponse {\n  int64 size = 1;\n  int64 inodes = 2;\n}\n\nmessage CleanupRequest {\n  string snapshotter = 1;\n}\n"
  },
  {
    "path": "crates/snapshots/vendor/github.com/containerd/containerd/vendor/github.com/containerd/containerd/api/types/mount.proto",
    "content": "/*\n   Copyright The containerd Authors.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n        http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n*/\n\nsyntax = \"proto3\";\n\npackage containerd.types;\n\nimport \"google/protobuf/timestamp.proto\";\n\noption go_package = \"github.com/containerd/containerd/api/types;types\";\n\n// Mount describes mounts for a container.\n//\n// This type is the lingua franca of ContainerD. All services provide mounts\n// to be used with the container at creation time.\n//\n// The Mount type follows the structure of the mount syscall, including a type,\n// source, target and options.\nmessage Mount {\n  // Type defines the nature of the mount.\n  string type = 1;\n\n  // Source specifies the name of the mount. Depending on mount type, this\n  // may be a volume name or a host path, or even ignored.\n  string source = 2;\n\n  // Target path in container\n  string target = 3;\n\n  // Options specifies zero or more fstab style mount options.\n  repeated string options = 4;\n}\n\nmessage ActiveMount {\n  Mount mount = 1;\n\n  google.protobuf.Timestamp mounted_at = 2;\n\n  string mount_point = 3;\n\n  map<string, string> data = 4;\n}\n\nmessage ActivationInfo {\n  string name = 1;\n\n  repeated ActiveMount active = 2;\n\n  repeated Mount system = 3;\n\n  map<string, string> labels = 4;\n}\n"
  },
  {
    "path": "crates/snapshots/vendor/gogoproto/gogo.proto",
    "content": "// Protocol Buffers for Go with Gadgets\n//\n// Copyright (c) 2013, The GoGo Authors. All rights reserved.\n// http://github.com/gogo/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto2\";\npackage gogoproto;\n\nimport \"google/protobuf/descriptor.proto\";\n\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"GoGoProtos\";\noption go_package = \"github.com/gogo/protobuf/gogoproto\";\n\nextend google.protobuf.EnumOptions {\n\toptional bool goproto_enum_prefix = 62001;\n\toptional bool goproto_enum_stringer = 62021;\n\toptional bool enum_stringer = 62022;\n\toptional string enum_customname = 62023;\n\toptional bool enumdecl = 62024;\n}\n\nextend google.protobuf.EnumValueOptions {\n\toptional string enumvalue_customname = 66001;\n}\n\nextend google.protobuf.FileOptions {\n\toptional bool goproto_getters_all = 63001;\n\toptional bool goproto_enum_prefix_all = 63002;\n\toptional bool goproto_stringer_all = 63003;\n\toptional bool verbose_equal_all = 63004;\n\toptional bool face_all = 63005;\n\toptional bool gostring_all = 63006;\n\toptional bool populate_all = 63007;\n\toptional bool stringer_all = 63008;\n\toptional bool onlyone_all = 63009;\n\n\toptional bool equal_all = 63013;\n\toptional bool description_all = 63014;\n\toptional bool testgen_all = 63015;\n\toptional bool benchgen_all = 63016;\n\toptional bool marshaler_all = 63017;\n\toptional bool unmarshaler_all = 63018;\n\toptional bool stable_marshaler_all = 63019;\n\n\toptional bool sizer_all = 63020;\n\n\toptional bool goproto_enum_stringer_all = 63021;\n\toptional bool enum_stringer_all = 63022;\n\n\toptional bool unsafe_marshaler_all = 63023;\n\toptional bool unsafe_unmarshaler_all = 63024;\n\n\toptional bool goproto_extensions_map_all = 63025;\n\toptional bool goproto_unrecognized_all = 63026;\n\toptional bool gogoproto_import = 63027;\n\toptional bool protosizer_all = 63028;\n\toptional bool compare_all = 63029;\n    optional bool typedecl_all = 63030;\n    optional bool enumdecl_all = 63031;\n\n\toptional bool goproto_registration = 63032;\n\toptional bool messagename_all = 63033;\n\n\toptional bool goproto_sizecache_all = 63034;\n\toptional bool goproto_unkeyed_all = 63035;\n}\n\nextend google.protobuf.MessageOptions {\n\toptional bool goproto_getters = 64001;\n\toptional bool goproto_stringer = 64003;\n\toptional bool verbose_equal = 64004;\n\toptional bool face = 64005;\n\toptional bool gostring = 64006;\n\toptional bool populate = 64007;\n\toptional bool stringer = 67008;\n\toptional bool onlyone = 64009;\n\n\toptional bool equal = 64013;\n\toptional bool description = 64014;\n\toptional bool testgen = 64015;\n\toptional bool benchgen = 64016;\n\toptional bool marshaler = 64017;\n\toptional bool unmarshaler = 64018;\n\toptional bool stable_marshaler = 64019;\n\n\toptional bool sizer = 64020;\n\n\toptional bool unsafe_marshaler = 64023;\n\toptional bool unsafe_unmarshaler = 64024;\n\n\toptional bool goproto_extensions_map = 64025;\n\toptional bool goproto_unrecognized = 64026;\n\n\toptional bool protosizer = 64028;\n\toptional bool compare = 64029;\n\n\toptional bool typedecl = 64030;\n\n\toptional bool messagename = 64033;\n\n\toptional bool goproto_sizecache = 64034;\n\toptional bool goproto_unkeyed = 64035;\n}\n\nextend google.protobuf.FieldOptions {\n\toptional bool nullable = 65001;\n\toptional bool embed = 65002;\n\toptional string customtype = 65003;\n\toptional string customname = 65004;\n\toptional string jsontag = 65005;\n\toptional string moretags = 65006;\n\toptional string casttype = 65007;\n\toptional string castkey = 65008;\n\toptional string castvalue = 65009;\n\n\toptional bool stdtime = 65010;\n\toptional bool stdduration = 65011;\n\toptional bool wktpointer = 65012;\n\n}\n"
  },
  {
    "path": "crates/snapshots/vendor/google/protobuf/descriptor.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Author: kenton@google.com (Kenton Varda)\n//  Based on original Protocol Buffers design by\n//  Sanjay Ghemawat, Jeff Dean, and others.\n//\n// The messages in this file describe the definitions found in .proto files.\n// A valid .proto file can be translated directly to a FileDescriptorProto\n// without any other information (e.g. without reading its imports).\n\n\nsyntax = \"proto2\";\n\npackage google.protobuf;\n\noption go_package = \"google.golang.org/protobuf/types/descriptorpb\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"DescriptorProtos\";\noption csharp_namespace = \"Google.Protobuf.Reflection\";\noption objc_class_prefix = \"GPB\";\noption cc_enable_arenas = true;\n\n// descriptor.proto must be optimized for speed because reflection-based\n// algorithms don't work during bootstrapping.\noption optimize_for = SPEED;\n\n// The protocol compiler can output a FileDescriptorSet containing the .proto\n// files it parses.\nmessage FileDescriptorSet {\n  repeated FileDescriptorProto file = 1;\n}\n\n// Describes a complete .proto file.\nmessage FileDescriptorProto {\n  optional string name = 1;     // file name, relative to root of source tree\n  optional string package = 2;  // e.g. \"foo\", \"foo.bar\", etc.\n\n  // Names of files imported by this file.\n  repeated string dependency = 3;\n  // Indexes of the public imported files in the dependency list above.\n  repeated int32 public_dependency = 10;\n  // Indexes of the weak imported files in the dependency list.\n  // For Google-internal migration only. Do not use.\n  repeated int32 weak_dependency = 11;\n\n  // All top-level definitions in this file.\n  repeated DescriptorProto message_type = 4;\n  repeated EnumDescriptorProto enum_type = 5;\n  repeated ServiceDescriptorProto service = 6;\n  repeated FieldDescriptorProto extension = 7;\n\n  optional FileOptions options = 8;\n\n  // This field contains optional information about the original source code.\n  // You may safely remove this entire field without harming runtime\n  // functionality of the descriptors -- the information is needed only by\n  // development tools.\n  optional SourceCodeInfo source_code_info = 9;\n\n  // The syntax of the proto file.\n  // The supported values are \"proto2\" and \"proto3\".\n  optional string syntax = 12;\n}\n\n// Describes a message type.\nmessage DescriptorProto {\n  optional string name = 1;\n\n  repeated FieldDescriptorProto field = 2;\n  repeated FieldDescriptorProto extension = 6;\n\n  repeated DescriptorProto nested_type = 3;\n  repeated EnumDescriptorProto enum_type = 4;\n\n  message ExtensionRange {\n    optional int32 start = 1;  // Inclusive.\n    optional int32 end = 2;    // Exclusive.\n\n    optional ExtensionRangeOptions options = 3;\n  }\n  repeated ExtensionRange extension_range = 5;\n\n  repeated OneofDescriptorProto oneof_decl = 8;\n\n  optional MessageOptions options = 7;\n\n  // Range of reserved tag numbers. Reserved tag numbers may not be used by\n  // fields or extension ranges in the same message. Reserved ranges may\n  // not overlap.\n  message ReservedRange {\n    optional int32 start = 1;  // Inclusive.\n    optional int32 end = 2;    // Exclusive.\n  }\n  repeated ReservedRange reserved_range = 9;\n  // Reserved field names, which may not be used by fields in the same message.\n  // A given name may only be reserved once.\n  repeated string reserved_name = 10;\n}\n\nmessage ExtensionRangeOptions {\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\n// Describes a field within a message.\nmessage FieldDescriptorProto {\n  enum Type {\n    // 0 is reserved for errors.\n    // Order is weird for historical reasons.\n    TYPE_DOUBLE = 1;\n    TYPE_FLOAT = 2;\n    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if\n    // negative values are likely.\n    TYPE_INT64 = 3;\n    TYPE_UINT64 = 4;\n    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if\n    // negative values are likely.\n    TYPE_INT32 = 5;\n    TYPE_FIXED64 = 6;\n    TYPE_FIXED32 = 7;\n    TYPE_BOOL = 8;\n    TYPE_STRING = 9;\n    // Tag-delimited aggregate.\n    // Group type is deprecated and not supported in proto3. However, Proto3\n    // implementations should still be able to parse the group wire format and\n    // treat group fields as unknown fields.\n    TYPE_GROUP = 10;\n    TYPE_MESSAGE = 11;  // Length-delimited aggregate.\n\n    // New in version 2.\n    TYPE_BYTES = 12;\n    TYPE_UINT32 = 13;\n    TYPE_ENUM = 14;\n    TYPE_SFIXED32 = 15;\n    TYPE_SFIXED64 = 16;\n    TYPE_SINT32 = 17;  // Uses ZigZag encoding.\n    TYPE_SINT64 = 18;  // Uses ZigZag encoding.\n  }\n\n  enum Label {\n    // 0 is reserved for errors\n    LABEL_OPTIONAL = 1;\n    LABEL_REQUIRED = 2;\n    LABEL_REPEATED = 3;\n  }\n\n  optional string name = 1;\n  optional int32 number = 3;\n  optional Label label = 4;\n\n  // If type_name is set, this need not be set.  If both this and type_name\n  // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.\n  optional Type type = 5;\n\n  // For message and enum types, this is the name of the type.  If the name\n  // starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping\n  // rules are used to find the type (i.e. first the nested types within this\n  // message are searched, then within the parent, on up to the root\n  // namespace).\n  optional string type_name = 6;\n\n  // For extensions, this is the name of the type being extended.  It is\n  // resolved in the same manner as type_name.\n  optional string extendee = 2;\n\n  // For numeric types, contains the original text representation of the value.\n  // For booleans, \"true\" or \"false\".\n  // For strings, contains the default text contents (not escaped in any way).\n  // For bytes, contains the C escaped value.  All bytes >= 128 are escaped.\n  // TODO(kenton):  Base-64 encode?\n  optional string default_value = 7;\n\n  // If set, gives the index of a oneof in the containing type's oneof_decl\n  // list.  This field is a member of that oneof.\n  optional int32 oneof_index = 9;\n\n  // JSON name of this field. The value is set by protocol compiler. If the\n  // user has set a \"json_name\" option on this field, that option's value\n  // will be used. Otherwise, it's deduced from the field's name by converting\n  // it to camelCase.\n  optional string json_name = 10;\n\n  optional FieldOptions options = 8;\n\n  // If true, this is a proto3 \"optional\". When a proto3 field is optional, it\n  // tracks presence regardless of field type.\n  //\n  // When proto3_optional is true, this field must be belong to a oneof to\n  // signal to old proto3 clients that presence is tracked for this field. This\n  // oneof is known as a \"synthetic\" oneof, and this field must be its sole\n  // member (each proto3 optional field gets its own synthetic oneof). Synthetic\n  // oneofs exist in the descriptor only, and do not generate any API. Synthetic\n  // oneofs must be ordered after all \"real\" oneofs.\n  //\n  // For message fields, proto3_optional doesn't create any semantic change,\n  // since non-repeated message fields always track presence. However it still\n  // indicates the semantic detail of whether the user wrote \"optional\" or not.\n  // This can be useful for round-tripping the .proto file. For consistency we\n  // give message fields a synthetic oneof also, even though it is not required\n  // to track presence. This is especially important because the parser can't\n  // tell if a field is a message or an enum, so it must always create a\n  // synthetic oneof.\n  //\n  // Proto2 optional fields do not set this flag, because they already indicate\n  // optional with `LABEL_OPTIONAL`.\n  optional bool proto3_optional = 17;\n}\n\n// Describes a oneof.\nmessage OneofDescriptorProto {\n  optional string name = 1;\n  optional OneofOptions options = 2;\n}\n\n// Describes an enum type.\nmessage EnumDescriptorProto {\n  optional string name = 1;\n\n  repeated EnumValueDescriptorProto value = 2;\n\n  optional EnumOptions options = 3;\n\n  // Range of reserved numeric values. Reserved values may not be used by\n  // entries in the same enum. Reserved ranges may not overlap.\n  //\n  // Note that this is distinct from DescriptorProto.ReservedRange in that it\n  // is inclusive such that it can appropriately represent the entire int32\n  // domain.\n  message EnumReservedRange {\n    optional int32 start = 1;  // Inclusive.\n    optional int32 end = 2;    // Inclusive.\n  }\n\n  // Range of reserved numeric values. Reserved numeric values may not be used\n  // by enum values in the same enum declaration. Reserved ranges may not\n  // overlap.\n  repeated EnumReservedRange reserved_range = 4;\n\n  // Reserved enum value names, which may not be reused. A given name may only\n  // be reserved once.\n  repeated string reserved_name = 5;\n}\n\n// Describes a value within an enum.\nmessage EnumValueDescriptorProto {\n  optional string name = 1;\n  optional int32 number = 2;\n\n  optional EnumValueOptions options = 3;\n}\n\n// Describes a service.\nmessage ServiceDescriptorProto {\n  optional string name = 1;\n  repeated MethodDescriptorProto method = 2;\n\n  optional ServiceOptions options = 3;\n}\n\n// Describes a method of a service.\nmessage MethodDescriptorProto {\n  optional string name = 1;\n\n  // Input and output type names.  These are resolved in the same way as\n  // FieldDescriptorProto.type_name, but must refer to a message type.\n  optional string input_type = 2;\n  optional string output_type = 3;\n\n  optional MethodOptions options = 4;\n\n  // Identifies if client streams multiple client messages\n  optional bool client_streaming = 5 [default = false];\n  // Identifies if server streams multiple server messages\n  optional bool server_streaming = 6 [default = false];\n}\n\n\n// ===================================================================\n// Options\n\n// Each of the definitions above may have \"options\" attached.  These are\n// just annotations which may cause code to be generated slightly differently\n// or may contain hints for code that manipulates protocol messages.\n//\n// Clients may define custom options as extensions of the *Options messages.\n// These extensions may not yet be known at parsing time, so the parser cannot\n// store the values in them.  Instead it stores them in a field in the *Options\n// message called uninterpreted_option. This field must have the same name\n// across all *Options messages. We then use this field to populate the\n// extensions when we build a descriptor, at which point all protos have been\n// parsed and so all extensions are known.\n//\n// Extension numbers for custom options may be chosen as follows:\n// * For options which will only be used within a single application or\n//   organization, or for experimental options, use field numbers 50000\n//   through 99999.  It is up to you to ensure that you do not use the\n//   same number for multiple options.\n// * For options which will be published and used publicly by multiple\n//   independent entities, e-mail protobuf-global-extension-registry@google.com\n//   to reserve extension numbers. Simply provide your project name (e.g.\n//   Objective-C plugin) and your project website (if available) -- there's no\n//   need to explain how you intend to use them. Usually you only need one\n//   extension number. You can declare multiple options with only one extension\n//   number by putting them in a sub-message. See the Custom Options section of\n//   the docs for examples:\n//   https://developers.google.com/protocol-buffers/docs/proto#options\n//   If this turns out to be popular, a web service will be set up\n//   to automatically assign option numbers.\n\nmessage FileOptions {\n\n  // Sets the Java package where classes generated from this .proto will be\n  // placed.  By default, the proto package is used, but this is often\n  // inappropriate because proto packages do not normally start with backwards\n  // domain names.\n  optional string java_package = 1;\n\n\n  // Controls the name of the wrapper Java class generated for the .proto file.\n  // That class will always contain the .proto file's getDescriptor() method as\n  // well as any top-level extensions defined in the .proto file.\n  // If java_multiple_files is disabled, then all the other classes from the\n  // .proto file will be nested inside the single wrapper outer class.\n  optional string java_outer_classname = 8;\n\n  // If enabled, then the Java code generator will generate a separate .java\n  // file for each top-level message, enum, and service defined in the .proto\n  // file.  Thus, these types will *not* be nested inside the wrapper class\n  // named by java_outer_classname.  However, the wrapper class will still be\n  // generated to contain the file's getDescriptor() method as well as any\n  // top-level extensions defined in the file.\n  optional bool java_multiple_files = 10 [default = false];\n\n  // This option does nothing.\n  optional bool java_generate_equals_and_hash = 20 [deprecated=true];\n\n  // If set true, then the Java2 code generator will generate code that\n  // throws an exception whenever an attempt is made to assign a non-UTF-8\n  // byte sequence to a string field.\n  // Message reflection will do the same.\n  // However, an extension field still accepts non-UTF-8 byte sequences.\n  // This option has no effect on when used with the lite runtime.\n  optional bool java_string_check_utf8 = 27 [default = false];\n\n\n  // Generated classes can be optimized for speed or code size.\n  enum OptimizeMode {\n    SPEED = 1;         // Generate complete code for parsing, serialization,\n                       // etc.\n    CODE_SIZE = 2;     // Use ReflectionOps to implement these methods.\n    LITE_RUNTIME = 3;  // Generate code using MessageLite and the lite runtime.\n  }\n  optional OptimizeMode optimize_for = 9 [default = SPEED];\n\n  // Sets the Go package where structs generated from this .proto will be\n  // placed. If omitted, the Go package will be derived from the following:\n  //   - The basename of the package import path, if provided.\n  //   - Otherwise, the package statement in the .proto file, if present.\n  //   - Otherwise, the basename of the .proto file, without extension.\n  optional string go_package = 11;\n\n\n\n\n  // Should generic services be generated in each language?  \"Generic\" services\n  // are not specific to any particular RPC system.  They are generated by the\n  // main code generators in each language (without additional plugins).\n  // Generic services were the only kind of service generation supported by\n  // early versions of google.protobuf.\n  //\n  // Generic services are now considered deprecated in favor of using plugins\n  // that generate code specific to your particular RPC system.  Therefore,\n  // these default to false.  Old code which depends on generic services should\n  // explicitly set them to true.\n  optional bool cc_generic_services = 16 [default = false];\n  optional bool java_generic_services = 17 [default = false];\n  optional bool py_generic_services = 18 [default = false];\n  optional bool php_generic_services = 42 [default = false];\n\n  // Is this file deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for everything in the file, or it will be completely ignored; in the very\n  // least, this is a formalization for deprecating files.\n  optional bool deprecated = 23 [default = false];\n\n  // Enables the use of arenas for the proto messages in this file. This applies\n  // only to generated classes for C++.\n  optional bool cc_enable_arenas = 31 [default = true];\n\n\n  // Sets the objective c class prefix which is prepended to all objective c\n  // generated classes from this .proto. There is no default.\n  optional string objc_class_prefix = 36;\n\n  // Namespace for generated classes; defaults to the package.\n  optional string csharp_namespace = 37;\n\n  // By default Swift generators will take the proto package and CamelCase it\n  // replacing '.' with underscore and use that to prefix the types/symbols\n  // defined. When this options is provided, they will use this value instead\n  // to prefix the types/symbols defined.\n  optional string swift_prefix = 39;\n\n  // Sets the php class prefix which is prepended to all php generated classes\n  // from this .proto. Default is empty.\n  optional string php_class_prefix = 40;\n\n  // Use this option to change the namespace of php generated classes. Default\n  // is empty. When this option is empty, the package name will be used for\n  // determining the namespace.\n  optional string php_namespace = 41;\n\n  // Use this option to change the namespace of php generated metadata classes.\n  // Default is empty. When this option is empty, the proto file name will be\n  // used for determining the namespace.\n  optional string php_metadata_namespace = 44;\n\n  // Use this option to change the package of ruby generated classes. Default\n  // is empty. When this option is not set, the package name will be used for\n  // determining the ruby package.\n  optional string ruby_package = 45;\n\n\n  // The parser stores options it doesn't recognize here.\n  // See the documentation for the \"Options\" section above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message.\n  // See the documentation for the \"Options\" section above.\n  extensions 1000 to max;\n\n  reserved 38;\n}\n\nmessage MessageOptions {\n  // Set true to use the old proto1 MessageSet wire format for extensions.\n  // This is provided for backwards-compatibility with the MessageSet wire\n  // format.  You should not use this for any other reason:  It's less\n  // efficient, has fewer features, and is more complicated.\n  //\n  // The message must be defined exactly as follows:\n  //   message Foo {\n  //     option message_set_wire_format = true;\n  //     extensions 4 to max;\n  //   }\n  // Note that the message cannot have any defined fields; MessageSets only\n  // have extensions.\n  //\n  // All extensions of your type must be singular messages; e.g. they cannot\n  // be int32s, enums, or repeated messages.\n  //\n  // Because this is an option, the above two restrictions are not enforced by\n  // the protocol compiler.\n  optional bool message_set_wire_format = 1 [default = false];\n\n  // Disables the generation of the standard \"descriptor()\" accessor, which can\n  // conflict with a field of the same name.  This is meant to make migration\n  // from proto1 easier; new code should avoid fields named \"descriptor\".\n  optional bool no_standard_descriptor_accessor = 2 [default = false];\n\n  // Is this message deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the message, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating messages.\n  optional bool deprecated = 3 [default = false];\n\n  reserved 4, 5, 6;\n\n  // Whether the message is an automatically generated map entry type for the\n  // maps field.\n  //\n  // For maps fields:\n  //     map<KeyType, ValueType> map_field = 1;\n  // The parsed descriptor looks like:\n  //     message MapFieldEntry {\n  //         option map_entry = true;\n  //         optional KeyType key = 1;\n  //         optional ValueType value = 2;\n  //     }\n  //     repeated MapFieldEntry map_field = 1;\n  //\n  // Implementations may choose not to generate the map_entry=true message, but\n  // use a native map in the target language to hold the keys and values.\n  // The reflection APIs in such implementations still need to work as\n  // if the field is a repeated message field.\n  //\n  // NOTE: Do not set the option in .proto files. Always use the maps syntax\n  // instead. The option should only be implicitly set by the proto compiler\n  // parser.\n  optional bool map_entry = 7;\n\n  reserved 8;  // javalite_serializable\n  reserved 9;  // javanano_as_lite\n\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage FieldOptions {\n  // The ctype option instructs the C++ code generator to use a different\n  // representation of the field than it normally would.  See the specific\n  // options below.  This option is not yet implemented in the open source\n  // release -- sorry, we'll try to include it in a future version!\n  optional CType ctype = 1 [default = STRING];\n  enum CType {\n    // Default mode.\n    STRING = 0;\n\n    CORD = 1;\n\n    STRING_PIECE = 2;\n  }\n  // The packed option can be enabled for repeated primitive fields to enable\n  // a more efficient representation on the wire. Rather than repeatedly\n  // writing the tag and type for each element, the entire array is encoded as\n  // a single length-delimited blob. In proto3, only explicit setting it to\n  // false will avoid using packed encoding.\n  optional bool packed = 2;\n\n  // The jstype option determines the JavaScript type used for values of the\n  // field.  The option is permitted only for 64 bit integral and fixed types\n  // (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING\n  // is represented as JavaScript string, which avoids loss of precision that\n  // can happen when a large value is converted to a floating point JavaScript.\n  // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to\n  // use the JavaScript \"number\" type.  The behavior of the default option\n  // JS_NORMAL is implementation dependent.\n  //\n  // This option is an enum to permit additional types to be added, e.g.\n  // goog.math.Integer.\n  optional JSType jstype = 6 [default = JS_NORMAL];\n  enum JSType {\n    // Use the default type.\n    JS_NORMAL = 0;\n\n    // Use JavaScript strings.\n    JS_STRING = 1;\n\n    // Use JavaScript numbers.\n    JS_NUMBER = 2;\n  }\n\n  // Should this field be parsed lazily?  Lazy applies only to message-type\n  // fields.  It means that when the outer message is initially parsed, the\n  // inner message's contents will not be parsed but instead stored in encoded\n  // form.  The inner message will actually be parsed when it is first accessed.\n  //\n  // This is only a hint.  Implementations are free to choose whether to use\n  // eager or lazy parsing regardless of the value of this option.  However,\n  // setting this option true suggests that the protocol author believes that\n  // using lazy parsing on this field is worth the additional bookkeeping\n  // overhead typically needed to implement it.\n  //\n  // This option does not affect the public interface of any generated code;\n  // all method signatures remain the same.  Furthermore, thread-safety of the\n  // interface is not affected by this option; const methods remain safe to\n  // call from multiple threads concurrently, while non-const methods continue\n  // to require exclusive access.\n  //\n  //\n  // Note that implementations may choose not to check required fields within\n  // a lazy sub-message.  That is, calling IsInitialized() on the outer message\n  // may return true even if the inner message has missing required fields.\n  // This is necessary because otherwise the inner message would have to be\n  // parsed in order to perform the check, defeating the purpose of lazy\n  // parsing.  An implementation which chooses not to check required fields\n  // must be consistent about it.  That is, for any particular sub-message, the\n  // implementation must either *always* check its required fields, or *never*\n  // check its required fields, regardless of whether or not the message has\n  // been parsed.\n  optional bool lazy = 5 [default = false];\n\n  // Is this field deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for accessors, or it will be completely ignored; in the very least, this\n  // is a formalization for deprecating fields.\n  optional bool deprecated = 3 [default = false];\n\n  // For Google-internal migration only. Do not use.\n  optional bool weak = 10 [default = false];\n\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n\n  reserved 4;  // removed jtype\n}\n\nmessage OneofOptions {\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage EnumOptions {\n\n  // Set this option to true to allow mapping different tag names to the same\n  // value.\n  optional bool allow_alias = 2;\n\n  // Is this enum deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the enum, or it will be completely ignored; in the very least, this\n  // is a formalization for deprecating enums.\n  optional bool deprecated = 3 [default = false];\n\n  reserved 5;  // javanano_as_lite\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage EnumValueOptions {\n  // Is this enum value deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the enum value, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating enum values.\n  optional bool deprecated = 1 [default = false];\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage ServiceOptions {\n\n  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC\n  //   framework.  We apologize for hoarding these numbers to ourselves, but\n  //   we were already using them long before we decided to release Protocol\n  //   Buffers.\n\n  // Is this service deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the service, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating services.\n  optional bool deprecated = 33 [default = false];\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\nmessage MethodOptions {\n\n  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC\n  //   framework.  We apologize for hoarding these numbers to ourselves, but\n  //   we were already using them long before we decided to release Protocol\n  //   Buffers.\n\n  // Is this method deprecated?\n  // Depending on the target platform, this can emit Deprecated annotations\n  // for the method, or it will be completely ignored; in the very least,\n  // this is a formalization for deprecating methods.\n  optional bool deprecated = 33 [default = false];\n\n  // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,\n  // or neither? HTTP based RPC implementation may choose GET verb for safe\n  // methods, and PUT verb for idempotent methods instead of the default POST.\n  enum IdempotencyLevel {\n    IDEMPOTENCY_UNKNOWN = 0;\n    NO_SIDE_EFFECTS = 1;  // implies idempotent\n    IDEMPOTENT = 2;       // idempotent, but may have side effects\n  }\n  optional IdempotencyLevel idempotency_level = 34\n      [default = IDEMPOTENCY_UNKNOWN];\n\n  // The parser stores options it doesn't recognize here. See above.\n  repeated UninterpretedOption uninterpreted_option = 999;\n\n  // Clients can define custom options in extensions of this message. See above.\n  extensions 1000 to max;\n}\n\n\n// A message representing a option the parser does not recognize. This only\n// appears in options protos created by the compiler::Parser class.\n// DescriptorPool resolves these when building Descriptor objects. Therefore,\n// options protos in descriptor objects (e.g. returned by Descriptor::options(),\n// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions\n// in them.\nmessage UninterpretedOption {\n  // The name of the uninterpreted option.  Each string represents a segment in\n  // a dot-separated name.  is_extension is true iff a segment represents an\n  // extension (denoted with parentheses in options specs in .proto files).\n  // E.g.,{ [\"foo\", false], [\"bar.baz\", true], [\"qux\", false] } represents\n  // \"foo.(bar.baz).qux\".\n  message NamePart {\n    required string name_part = 1;\n    required bool is_extension = 2;\n  }\n  repeated NamePart name = 2;\n\n  // The value of the uninterpreted option, in whatever type the tokenizer\n  // identified it as during parsing. Exactly one of these should be set.\n  optional string identifier_value = 3;\n  optional uint64 positive_int_value = 4;\n  optional int64 negative_int_value = 5;\n  optional double double_value = 6;\n  optional bytes string_value = 7;\n  optional string aggregate_value = 8;\n}\n\n// ===================================================================\n// Optional source code info\n\n// Encapsulates information about the original source file from which a\n// FileDescriptorProto was generated.\nmessage SourceCodeInfo {\n  // A Location identifies a piece of source code in a .proto file which\n  // corresponds to a particular definition.  This information is intended\n  // to be useful to IDEs, code indexers, documentation generators, and similar\n  // tools.\n  //\n  // For example, say we have a file like:\n  //   message Foo {\n  //     optional string foo = 1;\n  //   }\n  // Let's look at just the field definition:\n  //   optional string foo = 1;\n  //   ^       ^^     ^^  ^  ^^^\n  //   a       bc     de  f  ghi\n  // We have the following locations:\n  //   span   path               represents\n  //   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.\n  //   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).\n  //   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).\n  //   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).\n  //   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).\n  //\n  // Notes:\n  // - A location may refer to a repeated field itself (i.e. not to any\n  //   particular index within it).  This is used whenever a set of elements are\n  //   logically enclosed in a single code segment.  For example, an entire\n  //   extend block (possibly containing multiple extension definitions) will\n  //   have an outer location whose path refers to the \"extensions\" repeated\n  //   field without an index.\n  // - Multiple locations may have the same path.  This happens when a single\n  //   logical declaration is spread out across multiple places.  The most\n  //   obvious example is the \"extend\" block again -- there may be multiple\n  //   extend blocks in the same scope, each of which will have the same path.\n  // - A location's span is not always a subset of its parent's span.  For\n  //   example, the \"extendee\" of an extension declaration appears at the\n  //   beginning of the \"extend\" block and is shared by all extensions within\n  //   the block.\n  // - Just because a location's span is a subset of some other location's span\n  //   does not mean that it is a descendant.  For example, a \"group\" defines\n  //   both a type and a field in a single declaration.  Thus, the locations\n  //   corresponding to the type and field and their components will overlap.\n  // - Code which tries to interpret locations should probably be designed to\n  //   ignore those that it doesn't understand, as more types of locations could\n  //   be recorded in the future.\n  repeated Location location = 1;\n  message Location {\n    // Identifies which part of the FileDescriptorProto was defined at this\n    // location.\n    //\n    // Each element is a field number or an index.  They form a path from\n    // the root FileDescriptorProto to the place where the definition.  For\n    // example, this path:\n    //   [ 4, 3, 2, 7, 1 ]\n    // refers to:\n    //   file.message_type(3)  // 4, 3\n    //       .field(7)         // 2, 7\n    //       .name()           // 1\n    // This is because FileDescriptorProto.message_type has field number 4:\n    //   repeated DescriptorProto message_type = 4;\n    // and DescriptorProto.field has field number 2:\n    //   repeated FieldDescriptorProto field = 2;\n    // and FieldDescriptorProto.name has field number 1:\n    //   optional string name = 1;\n    //\n    // Thus, the above path gives the location of a field name.  If we removed\n    // the last element:\n    //   [ 4, 3, 2, 7 ]\n    // this path refers to the whole field declaration (from the beginning\n    // of the label to the terminating semicolon).\n    repeated int32 path = 1 [packed = true];\n\n    // Always has exactly three or four elements: start line, start column,\n    // end line (optional, otherwise assumed same as start line), end column.\n    // These are packed into a single field for efficiency.  Note that line\n    // and column numbers are zero-based -- typically you will want to add\n    // 1 to each before displaying to a user.\n    repeated int32 span = 2 [packed = true];\n\n    // If this SourceCodeInfo represents a complete declaration, these are any\n    // comments appearing before and after the declaration which appear to be\n    // attached to the declaration.\n    //\n    // A series of line comments appearing on consecutive lines, with no other\n    // tokens appearing on those lines, will be treated as a single comment.\n    //\n    // leading_detached_comments will keep paragraphs of comments that appear\n    // before (but not connected to) the current element. Each paragraph,\n    // separated by empty lines, will be one comment element in the repeated\n    // field.\n    //\n    // Only the comment content is provided; comment markers (e.g. //) are\n    // stripped out.  For block comments, leading whitespace and an asterisk\n    // will be stripped from the beginning of each line other than the first.\n    // Newlines are included in the output.\n    //\n    // Examples:\n    //\n    //   optional int32 foo = 1;  // Comment attached to foo.\n    //   // Comment attached to bar.\n    //   optional int32 bar = 2;\n    //\n    //   optional string baz = 3;\n    //   // Comment attached to baz.\n    //   // Another line attached to baz.\n    //\n    //   // Comment attached to qux.\n    //   //\n    //   // Another line attached to qux.\n    //   optional double qux = 4;\n    //\n    //   // Detached comment for corge. This is not leading or trailing comments\n    //   // to qux or corge because there are blank lines separating it from\n    //   // both.\n    //\n    //   // Detached comment for corge paragraph 2.\n    //\n    //   optional string corge = 5;\n    //   /* Block comment attached\n    //    * to corge.  Leading asterisks\n    //    * will be removed. */\n    //   /* Block comment attached to\n    //    * grault. */\n    //   optional int32 grault = 6;\n    //\n    //   // ignored detached comments.\n    optional string leading_comments = 3;\n    optional string trailing_comments = 4;\n    repeated string leading_detached_comments = 6;\n  }\n}\n\n// Describes the relationship between generated code and its original source\n// file. A GeneratedCodeInfo message is associated with only one generated\n// source file, but may contain references to different source .proto files.\nmessage GeneratedCodeInfo {\n  // An Annotation connects some span of text in generated code to an element\n  // of its generating .proto file.\n  repeated Annotation annotation = 1;\n  message Annotation {\n    // Identifies the element in the original source .proto file. This field\n    // is formatted the same as SourceCodeInfo.Location.path.\n    repeated int32 path = 1 [packed = true];\n\n    // Identifies the filesystem path to the original source .proto.\n    optional string source_file = 2;\n\n    // Identifies the starting offset in bytes in the generated code\n    // that relates to the identified object.\n    optional int32 begin = 3;\n\n    // Identifies the ending offset in bytes in the generated code that\n    // relates to the identified offset. The end offset should be one past\n    // the last relevant byte (so the length of the text = end - begin).\n    optional int32 end = 4;\n  }\n}\n"
  },
  {
    "path": "crates/snapshots/vendor/google/protobuf/empty.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption go_package = \"google.golang.org/protobuf/types/known/emptypb\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"EmptyProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\noption cc_enable_arenas = true;\n\n// A generic empty message that you can re-use to avoid defining duplicated\n// empty messages in your APIs. A typical example is to use it as the request\n// or the response type of an API method. For instance:\n//\n//     service Foo {\n//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n//     }\n//\n// The JSON representation for `Empty` is empty JSON object `{}`.\nmessage Empty {}\n"
  },
  {
    "path": "crates/snapshots/vendor/google/protobuf/field_mask.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"FieldMaskProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\noption go_package = \"google.golang.org/protobuf/types/known/fieldmaskpb\";\noption cc_enable_arenas = true;\n\n// `FieldMask` represents a set of symbolic field paths, for example:\n//\n//     paths: \"f.a\"\n//     paths: \"f.b.d\"\n//\n// Here `f` represents a field in some root message, `a` and `b`\n// fields in the message found in `f`, and `d` a field found in the\n// message in `f.b`.\n//\n// Field masks are used to specify a subset of fields that should be\n// returned by a get operation or modified by an update operation.\n// Field masks also have a custom JSON encoding (see below).\n//\n// # Field Masks in Projections\n//\n// When used in the context of a projection, a response message or\n// sub-message is filtered by the API to only contain those fields as\n// specified in the mask. For example, if the mask in the previous\n// example is applied to a response message as follows:\n//\n//     f {\n//       a : 22\n//       b {\n//         d : 1\n//         x : 2\n//       }\n//       y : 13\n//     }\n//     z: 8\n//\n// The result will not contain specific values for fields x,y and z\n// (their value will be set to the default, and omitted in proto text\n// output):\n//\n//\n//     f {\n//       a : 22\n//       b {\n//         d : 1\n//       }\n//     }\n//\n// A repeated field is not allowed except at the last position of a\n// paths string.\n//\n// If a FieldMask object is not present in a get operation, the\n// operation applies to all fields (as if a FieldMask of all fields\n// had been specified).\n//\n// Note that a field mask does not necessarily apply to the\n// top-level response message. In case of a REST get operation, the\n// field mask applies directly to the response, but in case of a REST\n// list operation, the mask instead applies to each individual message\n// in the returned resource list. In case of a REST custom method,\n// other definitions may be used. Where the mask applies will be\n// clearly documented together with its declaration in the API.  In\n// any case, the effect on the returned resource/resources is required\n// behavior for APIs.\n//\n// # Field Masks in Update Operations\n//\n// A field mask in update operations specifies which fields of the\n// targeted resource are going to be updated. The API is required\n// to only change the values of the fields as specified in the mask\n// and leave the others untouched. If a resource is passed in to\n// describe the updated values, the API ignores the values of all\n// fields not covered by the mask.\n//\n// If a repeated field is specified for an update operation, new values will\n// be appended to the existing repeated field in the target resource. Note that\n// a repeated field is only allowed in the last position of a `paths` string.\n//\n// If a sub-message is specified in the last position of the field mask for an\n// update operation, then new value will be merged into the existing sub-message\n// in the target resource.\n//\n// For example, given the target message:\n//\n//     f {\n//       b {\n//         d: 1\n//         x: 2\n//       }\n//       c: [1]\n//     }\n//\n// And an update message:\n//\n//     f {\n//       b {\n//         d: 10\n//       }\n//       c: [2]\n//     }\n//\n// then if the field mask is:\n//\n//  paths: [\"f.b\", \"f.c\"]\n//\n// then the result will be:\n//\n//     f {\n//       b {\n//         d: 10\n//         x: 2\n//       }\n//       c: [1, 2]\n//     }\n//\n// An implementation may provide options to override this default behavior for\n// repeated and message fields.\n//\n// In order to reset a field's value to the default, the field must\n// be in the mask and set to the default value in the provided resource.\n// Hence, in order to reset all fields of a resource, provide a default\n// instance of the resource and set all fields in the mask, or do\n// not provide a mask as described below.\n//\n// If a field mask is not present on update, the operation applies to\n// all fields (as if a field mask of all fields has been specified).\n// Note that in the presence of schema evolution, this may mean that\n// fields the client does not know and has therefore not filled into\n// the request will be reset to their default. If this is unwanted\n// behavior, a specific service may require a client to always specify\n// a field mask, producing an error if not.\n//\n// As with get operations, the location of the resource which\n// describes the updated values in the request message depends on the\n// operation kind. In any case, the effect of the field mask is\n// required to be honored by the API.\n//\n// ## Considerations for HTTP REST\n//\n// The HTTP kind of an update operation which uses a field mask must\n// be set to PATCH instead of PUT in order to satisfy HTTP semantics\n// (PUT must only be used for full updates).\n//\n// # JSON Encoding of Field Masks\n//\n// In JSON, a field mask is encoded as a single string where paths are\n// separated by a comma. Fields name in each path are converted\n// to/from lower-camel naming conventions.\n//\n// As an example, consider the following message declarations:\n//\n//     message Profile {\n//       User user = 1;\n//       Photo photo = 2;\n//     }\n//     message User {\n//       string display_name = 1;\n//       string address = 2;\n//     }\n//\n// In proto a field mask for `Profile` may look as such:\n//\n//     mask {\n//       paths: \"user.display_name\"\n//       paths: \"photo\"\n//     }\n//\n// In JSON, the same mask is represented as below:\n//\n//     {\n//       mask: \"user.displayName,photo\"\n//     }\n//\n// # Field Masks and Oneof Fields\n//\n// Field masks treat fields in oneofs just as regular fields. Consider the\n// following message:\n//\n//     message SampleMessage {\n//       oneof test_oneof {\n//         string name = 4;\n//         SubMessage sub_message = 9;\n//       }\n//     }\n//\n// The field mask can be:\n//\n//     mask {\n//       paths: \"name\"\n//     }\n//\n// Or:\n//\n//     mask {\n//       paths: \"sub_message\"\n//     }\n//\n// Note that oneof type names (\"test_oneof\" in this case) cannot be used in\n// paths.\n//\n// ## Field Mask Verification\n//\n// The implementation of any API method which has a FieldMask type field in the\n// request should verify the included field paths, and return an\n// `INVALID_ARGUMENT` error if any path is unmappable.\nmessage FieldMask {\n  // The set of field mask paths.\n  repeated string paths = 1;\n}\n"
  },
  {
    "path": "crates/snapshots/vendor/google/protobuf/timestamp.proto",
    "content": "// Protocol Buffers - Google's data interchange format\n// Copyright 2008 Google Inc.  All rights reserved.\n// https://developers.google.com/protocol-buffers/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nsyntax = \"proto3\";\n\npackage google.protobuf;\n\noption csharp_namespace = \"Google.Protobuf.WellKnownTypes\";\noption cc_enable_arenas = true;\noption go_package = \"google.golang.org/protobuf/types/known/timestamppb\";\noption java_package = \"com.google.protobuf\";\noption java_outer_classname = \"TimestampProto\";\noption java_multiple_files = true;\noption objc_class_prefix = \"GPB\";\n\n// A Timestamp represents a point in time independent of any time zone or local\n// calendar, encoded as a count of seconds and fractions of seconds at\n// nanosecond resolution. The count is relative to an epoch at UTC midnight on\n// January 1, 1970, in the proleptic Gregorian calendar which extends the\n// Gregorian calendar backwards to year one.\n//\n// All minutes are 60 seconds long. Leap seconds are \"smeared\" so that no leap\n// second table is needed for interpretation, using a [24-hour linear\n// smear](https://developers.google.com/time/smear).\n//\n// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By\n// restricting to that range, we ensure that we can convert to and from [RFC\n// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.\n//\n// # Examples\n//\n// Example 1: Compute Timestamp from POSIX `time()`.\n//\n//     Timestamp timestamp;\n//     timestamp.set_seconds(time(NULL));\n//     timestamp.set_nanos(0);\n//\n// Example 2: Compute Timestamp from POSIX `gettimeofday()`.\n//\n//     struct timeval tv;\n//     gettimeofday(&tv, NULL);\n//\n//     Timestamp timestamp;\n//     timestamp.set_seconds(tv.tv_sec);\n//     timestamp.set_nanos(tv.tv_usec * 1000);\n//\n// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.\n//\n//     FILETIME ft;\n//     GetSystemTimeAsFileTime(&ft);\n//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;\n//\n//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z\n//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.\n//     Timestamp timestamp;\n//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));\n//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));\n//\n// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.\n//\n//     long millis = System.currentTimeMillis();\n//\n//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)\n//         .setNanos((int) ((millis % 1000) * 1000000)).build();\n//\n//\n// Example 5: Compute Timestamp from Java `Instant.now()`.\n//\n//     Instant now = Instant.now();\n//\n//     Timestamp timestamp =\n//         Timestamp.newBuilder().setSeconds(now.getEpochSecond())\n//             .setNanos(now.getNano()).build();\n//\n//\n// Example 6: Compute Timestamp from current time in Python.\n//\n//     timestamp = Timestamp()\n//     timestamp.GetCurrentTime()\n//\n// # JSON Mapping\n//\n// In JSON format, the Timestamp type is encoded as a string in the\n// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the\n// format is \"{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z\"\n// where {year} is always expressed using four digits while {month}, {day},\n// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional\n// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),\n// are optional. The \"Z\" suffix indicates the timezone (\"UTC\"); the timezone\n// is required. A proto3 JSON serializer should always use UTC (as indicated by\n// \"Z\") when printing the Timestamp type and a proto3 JSON parser should be\n// able to accept both UTC and other timezones (as indicated by an offset).\n//\n// For example, \"2017-01-15T01:30:15.01Z\" encodes 15.01 seconds past\n// 01:30 UTC on January 15, 2017.\n//\n// In JavaScript, one can convert a Date object to this format using the\n// standard\n// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)\n// method. In Python, a standard `datetime.datetime` object can be converted\n// to this format using\n// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with\n// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use\n// the Joda Time's [`ISODateTimeFormat.dateTime()`](\n// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D\n// ) to obtain a formatter capable of generating timestamps in this format.\n//\n//\nmessage Timestamp {\n  // Represents seconds of UTC time since Unix epoch\n  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n  // 9999-12-31T23:59:59Z inclusive.\n  int64 seconds = 1;\n\n  // Non-negative fractions of a second at nanosecond resolution. Negative\n  // second values with fractions must still have non-negative nanos values\n  // that count forward in time. Must be from 0 to 999,999,999\n  // inclusive.\n  int32 nanos = 2;\n}\n"
  },
  {
    "path": "deny.toml",
    "content": "# This template contains all of the possible sections and their default values\n\n# Note that all fields that take a lint level have these possible values:\n# * deny - An error will be produced and the check will fail\n# * warn - A warning will be produced, but the check will not fail\n# * allow - No warning or error will be produced, though in some cases a note\n# will be\n\n# The values provided in this template are the default values that will be used\n# when any section or field is not specified in your own configuration\n\n# Root options\n\n# The graph table configures how the dependency graph is constructed and thus\n# which crates the checks are performed against\n[graph]\n# If 1 or more target triples (and optionally, target_features) are specified,\n# only the specified targets will be checked when running `cargo deny check`.\n# This means, if a particular package is only ever used as a target specific\n# dependency, such as, for example, the `nix` crate only being used via the\n# `target_family = \"unix\"` configuration, that only having windows targets in\n# this list would mean the nix crate, as well as any of its exclusive\n# dependencies not shared by any other crates, would be ignored, as the target\n# list here is effectively saying which targets you are building for.\ntargets = [\n    # The triple can be any string, but only the target triples built in to\n    # rustc (as of 1.40) can be checked against actual config expressions\n    #\"x86_64-unknown-linux-musl\",\n    # You can also specify which target_features you promise are enabled for a\n    # particular target. target_features are currently not validated against\n    # the actual valid features supported by the target architecture.\n    #{ triple = \"wasm32-unknown-unknown\", features = [\"atomics\"] },\n]\n# When creating the dependency graph used as the source of truth when checks are\n# executed, this field can be used to prune crates from the graph, removing them\n# from the view of cargo-deny. This is an extremely heavy hammer, as if a crate\n# is pruned from the graph, all of its dependencies will also be pruned unless\n# they are connected to another crate in the graph that hasn't been pruned,\n# so it should be used with care. The identifiers are [Package ID Specifications]\n# (https://doc.rust-lang.org/cargo/reference/pkgid-spec.html)\n#exclude = []\n# If true, metadata will be collected with `--all-features`. Note that this can't\n# be toggled off if true, if you want to conditionally enable `--all-features` it\n# is recommended to pass `--all-features` on the cmd line instead\nall-features = false\n# If true, metadata will be collected with `--no-default-features`. The same\n# caveat with `all-features` applies\nno-default-features = false\n# If set, these feature will be enabled when collecting metadata. If `--features`\n# is specified on the cmd line they will take precedence over this option.\n#features = []\n\n# The output table provides options for how/if diagnostics are outputted\n[output]\n# When outputting inclusion graphs in diagnostics that include features, this\n# option can be used to specify the depth at which feature edges will be added.\n# This option is included since the graphs can be quite large and the addition\n# of features from the crate(s) to all of the graph roots can be far too verbose.\n# This option can be overridden via `--feature-depth` on the cmd line\nfeature-depth = 1\n\n# This section is considered when running `cargo deny check advisories`\n# More documentation for the advisories section can be found here:\n# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html\n[advisories]\n# The path where the advisory databases are cloned/fetched into\n#db-path = \"$CARGO_HOME/advisory-dbs\"\n# The url(s) of the advisory databases to use\n#db-urls = [\"https://github.com/rustsec/advisory-db\"]\n# A list of advisory IDs to ignore. Note that ignored advisories will still\n# output a note when they are encountered.\nignore = [\n    #\"RUSTSEC-0000-0000\",\n    #{ id = \"RUSTSEC-0000-0000\", reason = \"you can specify a reason the advisory is ignored\" },\n    #\"a-crate-that-is-yanked@0.1.1\", # you can also ignore yanked crate versions if you wish\n    #{ crate = \"a-crate-that-is-yanked@0.1.1\", reason = \"you can specify why you are ignoring the yanked crate\" },\n]\n# If this is true, then cargo deny will use the git executable to fetch advisory database.\n# If this is false, then it uses a built-in git library.\n# Setting this to true can be helpful if you have special authentication requirements that cargo-deny does not support.\n# See Git Authentication for more information about setting up git authentication.\n#git-fetch-with-cli = true\n\n# This section is considered when running `cargo deny check licenses`\n# More documentation for the licenses section can be found here:\n# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html\n[licenses]\n# List of explicitly allowed licenses\n# See https://spdx.org/licenses/ for list of possible licenses\n# [possible values: any SPDX 3.11 short identifier (+ optional exception)].\nallow = [\n    \"MIT\",\n    \"Apache-2.0\",\n    \"BSD-3-Clause\",\n    \"Zlib\",\n    \"Unicode-3.0\",\n    \"Zlib\",\n    #\"Apache-2.0 WITH LLVM-exception\",\n]\n# The confidence threshold for detecting a license from license text.\n# The higher the value, the more closely the license text must be to the\n# canonical license text of a valid SPDX license file.\n# [possible values: any between 0.0 and 1.0].\nconfidence-threshold = 0.8\n# Allow 1 or more licenses on a per-crate basis, so that particular licenses\n# aren't accepted for every possible crate as with the normal allow list\nexceptions = [\n    # Each entry is the crate and version constraint, and its specific allow\n    # list\n    #{ allow = [\"Zlib\"], crate = \"adler32\" },\n    { allow = [\"Unicode-DFS-2016\"], name = \"unicode-ident\", version = \"*\" },\n]\n\n# Some crates don't have (easily) machine readable licensing information,\n# adding a clarification entry for it allows you to manually specify the\n# licensing information\n#[[licenses.clarify]]\n# The package spec the clarification applies to\n#crate = \"ring\"\n# The SPDX expression for the license requirements of the crate\n#expression = \"MIT AND ISC AND OpenSSL\"\n# One or more files in the crate's source used as the \"source of truth\" for\n# the license expression. If the contents match, the clarification will be used\n# when running the license check, otherwise the clarification will be ignored\n# and the crate will be checked normally, which may produce warnings or errors\n# depending on the rest of your configuration\n#license-files = [\n# Each entry is a crate relative path, and the (opaque) hash of its contents\n#{ path = \"LICENSE\", hash = 0xbd0eed23 }\n#]\n\n[licenses.private]\n# If true, ignores workspace crates that aren't published, or are only\n# published to private registries.\n# To see how to mark a crate as unpublished (to the official registry),\n# visit https://doc.rust-lang.org/cargo/reference/manifest.html#the-publish-field.\nignore = false\n# One or more private registries that you might publish crates to, if a crate\n# is only published to private registries, and ignore is true, the crate will\n# not have its license(s) checked\nregistries = [\n    #\"https://sekretz.com/registry\n]\n\n# This section is considered when running `cargo deny check bans`.\n# More documentation about the 'bans' section can be found here:\n# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html\n[bans]\n# Lint level for when multiple versions of the same crate are detected\nmultiple-versions = \"warn\"\n# Lint level for when a crate version requirement is `*`\nwildcards = \"allow\"\n# The graph highlighting used when creating dotgraphs for crates\n# with multiple versions\n# * lowest-version - The path to the lowest versioned duplicate is highlighted\n# * simplest-path - The path to the version with the fewest edges is highlighted\n# * all - Both lowest-version and simplest-path are used\nhighlight = \"all\"\n# The default lint level for `default` features for crates that are members of\n# the workspace that is being checked. This can be overridden by allowing/denying\n# `default` on a crate-by-crate basis if desired.\nworkspace-default-features = \"allow\"\n# The default lint level for `default` features for external crates that are not\n# members of the workspace. This can be overridden by allowing/denying `default`\n# on a crate-by-crate basis if desired.\nexternal-default-features = \"allow\"\n# List of crates that are allowed. Use with care!\nallow = [\n    #\"ansi_term@0.11.0\",\n    #{ crate = \"ansi_term@0.11.0\", reason = \"you can specify a reason it is allowed\" },\n]\n# List of crates to deny\ndeny = [\n    #\"ansi_term@0.11.0\",\n    #{ crate = \"ansi_term@0.11.0\", reason = \"you can specify a reason it is banned\" },\n    # Wrapper crates can optionally be specified to allow the crate when it\n    # is a direct dependency of the otherwise banned crate\n    #{ crate = \"ansi_term@0.11.0\", wrappers = [\"this-crate-directly-depends-on-ansi_term\"] },\n]\n\n# List of features to allow/deny\n# Each entry the name of a crate and a version range. If version is\n# not specified, all versions will be matched.\n#[[bans.features]]\n#crate = \"reqwest\"\n# Features to not allow\n#deny = [\"json\"]\n# Features to allow\n#allow = [\n#    \"rustls\",\n#    \"__rustls\",\n#    \"__tls\",\n#    \"hyper-rustls\",\n#    \"rustls\",\n#    \"rustls-pemfile\",\n#    \"rustls-tls-webpki-roots\",\n#    \"tokio-rustls\",\n#    \"webpki-roots\",\n#]\n# If true, the allowed features must exactly match the enabled feature set. If\n# this is set there is no point setting `deny`\n#exact = true\n\n# Certain crates/versions that will be skipped when doing duplicate detection.\nskip = [\n    #\"ansi_term@0.11.0\",\n    #{ crate = \"ansi_term@0.11.0\", reason = \"you can specify a reason why it can't be updated/removed\" },\n]\n# Similarly to `skip` allows you to skip certain crates during duplicate\n# detection. Unlike skip, it also includes the entire tree of transitive\n# dependencies starting at the specified crate, up to a certain depth, which is\n# by default infinite.\nskip-tree = [\n    #\"ansi_term@0.11.0\", # will be skipped along with _all_ of its direct and transitive dependencies\n    #{ crate = \"ansi_term@0.11.0\", depth = 20 },\n]\n\n# This section is considered when running `cargo deny check sources`.\n# More documentation about the 'sources' section can be found here:\n# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html\n[sources]\n# Lint level for what to happen when a crate from a crate registry that is not\n# in the allow list is encountered\nunknown-registry = \"warn\"\n# Lint level for what to happen when a crate from a git repository that is not\n# in the allow list is encountered\nunknown-git = \"warn\"\n# List of URLs for allowed crate registries. Defaults to the crates.io index\n# if not specified. If it is specified but empty, no registries are allowed.\nallow-registry = [\"https://github.com/rust-lang/crates.io-index\"]\n# List of URLs for allowed Git repositories\nallow-git = []\n\n[sources.allow-org]\n# github.com organizations to allow git sources for\ngithub = []\n# gitlab.com organizations to allow git sources for\ngitlab = []\n# bitbucket.org organizations to allow git sources for\nbitbucket = []\n"
  },
  {
    "path": "rust-toolchain.toml",
    "content": "[toolchain]\nchannel = \"1.91\"\ncomponents = [\"rustfmt\", \"clippy\", \"llvm-tools\"]\n"
  },
  {
    "path": "rustfmt.toml",
    "content": "newline_style = \"Unix\"\nunstable_features = true # Cargo fmt now needs to be called with `cargo +nightly fmt`\ngroup_imports = \"StdExternalCrate\" # Create 3 groups: std, external crates, and self.\nimports_granularity = \"Crate\" # Merge imports from the same crate into a single use statement\n"
  },
  {
    "path": "scripts/install-protobuf.sh",
    "content": "#!/bin/bash\n\n# Helper script for Github Actions to install protobuf on different runners.\necho \"OS: $RUNNER_OS\"\n\nif [ \"$RUNNER_OS\" == 'Linux' ]; then\n    # Install on Linux\n    sudo apt-get update\n    sudo apt-get install -y protobuf-compiler\nelif [ \"$RUNNER_OS\" == 'macOS' ]; then\n    # Install on macOS\n    brew install protobuf\nelif [ \"$RUNNER_OS\" == 'Windows' ]; then\n    # Install on Windows\n    choco install -y protoc\nelse\n    echo \"Unsupported OS: $RUNNER_OS\"\n    exit 1\nfi\n\n# Check the installed Protobuf version\nprotoc --version\n"
  },
  {
    "path": "scripts/update-vendor.sh",
    "content": "#!/bin/bash\n\n# A simple bash script to synchronize proto files from containerd to vendor/ directories of\n# each crate.\n#\n# VERSION specified containerd release that script will download to extract protobuf files.\n#\n# For each crate, the script expects a text file named `rsync.txt` in the crate's directory.\n# The file should contain a list of proto files that should be synchronized from containerd.\n\nVERSION=\"v2.3.0\"\n\nset -x\n\n# Download containerd source code.\nwget https://github.com/containerd/containerd/archive/refs/tags/$VERSION.tar.gz -O containerd.tar.gz\nif [ $? -ne 0 ]; then\n    echo \"Error: Failed to download containerd source code.\"\n    exit 1\nfi\n\n# Ensure the file is removed on exit\ntrap 'rm containerd.tar.gz' EXIT\n\n# Extract zip archive to a temporary directory.\nTEMP_DIR=$(mktemp -d)\ntar --extract \\\n    --file containerd.tar.gz \\\n    --strip-components=1 \\\n    --directory $TEMP_DIR\n\nfunction sync_crate() {\n    local crate_name=$1\n    local temp_dir=$2\n\n    rm -rf crates/$crate_name/vendor/github.com/containerd/containerd/\n\n    rsync -avm \\\n        --include='*/' \\\n        --include-from=crates/$crate_name/rsync.txt \\\n        --exclude='*' \\\n        $temp_dir/ \\\n        crates/$crate_name/vendor/github.com/containerd/containerd/\n}\n\nsync_crate \"shim-protos\" $TEMP_DIR\nsync_crate \"snapshots\" $TEMP_DIR\nsync_crate \"client\" $TEMP_DIR\n"
  }
]