Repository: sigstore/sigstore-rs Branch: main Commit: 1485d8b140fc Files: 165 Total size: 814.8 KB Directory structure: gitextract_mgzc87yz/ ├── .cargo/ │ └── audit.toml ├── .github/ │ ├── dependabot.yml │ └── workflows/ │ ├── auto-publish-crates-upon-release.yml │ ├── conformance.yml │ ├── security-audit.yml │ ├── tests.yml │ └── zizmor.yml ├── .gitignore ├── .taplo.toml ├── CHANGELOG.md ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTORS.md ├── COPYRIGHT.txt ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── clippy.toml ├── examples/ │ ├── README.md │ ├── bundle/ │ │ ├── README.md │ │ └── main.rs │ ├── cosign/ │ │ ├── sign/ │ │ │ ├── README.md │ │ │ └── main.rs │ │ ├── trustroot/ │ │ │ └── main.rs │ │ ├── verify/ │ │ │ ├── README.md │ │ │ └── main.rs │ │ ├── verify-blob/ │ │ │ ├── .gitignore │ │ │ ├── README.md │ │ │ └── main.rs │ │ └── verify-bundle/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── main.rs │ │ └── run.sh │ ├── fulcio/ │ │ └── cert/ │ │ └── main.rs │ ├── key_interface/ │ │ ├── README.md │ │ ├── key_pair_gen_and_export/ │ │ │ └── main.rs │ │ ├── key_pair_gen_sign_verify/ │ │ │ └── main.rs │ │ └── key_pair_import/ │ │ ├── ECDSA_P256_ASN1_ENCRYPTED_PRIVATE_PEM.key │ │ ├── ECDSA_P256_ASN1_PRIVATE_DER.key │ │ ├── ECDSA_P256_ASN1_PRIVATE_PEM.key │ │ ├── ECDSA_P256_ASN1_PUBLIC_DER.pub │ │ ├── ECDSA_P256_ASN1_PUBLIC_PEM.pub │ │ └── main.rs │ ├── openidflow/ │ │ ├── README.md │ │ └── openidconnect/ │ │ └── main.rs │ └── rekor/ │ ├── README.md │ ├── create_log_entry/ │ │ └── main.rs │ ├── get_log_entry_by_index/ │ │ └── main.rs │ ├── get_log_entry_by_uuid/ │ │ └── main.rs │ ├── get_log_info/ │ │ └── main.rs │ ├── get_log_proof/ │ │ └── main.rs │ ├── get_public_key/ │ │ └── main.rs │ ├── merkle_proofs/ │ │ ├── consistency.rs │ │ └── inclusion.rs │ ├── search_index/ │ │ └── main.rs │ └── search_log_query/ │ └── main.rs ├── rust-toolchain.toml ├── src/ │ ├── bundle/ │ │ ├── mod.rs │ │ ├── models.rs │ │ ├── sign.rs │ │ └── verify/ │ │ ├── mod.rs │ │ ├── models.rs │ │ ├── policy.rs │ │ └── verifier.rs │ ├── cosign/ │ │ ├── bundle.rs │ │ ├── client.rs │ │ ├── client_builder.rs │ │ ├── constants.rs │ │ ├── constraint/ │ │ │ ├── annotation.rs │ │ │ ├── mod.rs │ │ │ └── signature.rs │ │ ├── mod.rs │ │ ├── payload/ │ │ │ ├── mod.rs │ │ │ └── simple_signing.rs │ │ ├── signature_layers.rs │ │ └── verification_constraint/ │ │ ├── annotation_verifier.rs │ │ ├── cert_subject_email_verifier.rs │ │ ├── cert_subject_url_verifier.rs │ │ ├── certificate_verifier.rs │ │ ├── mod.rs │ │ └── public_key_verifier.rs │ ├── crypto/ │ │ ├── certificate.rs │ │ ├── certificate_pool.rs │ │ ├── keyring.rs │ │ ├── merkle/ │ │ │ ├── mod.rs │ │ │ ├── proof_verification.rs │ │ │ └── rfc6962.rs │ │ ├── mod.rs │ │ ├── signing_key/ │ │ │ ├── ecdsa/ │ │ │ │ ├── ec.rs │ │ │ │ └── mod.rs │ │ │ ├── ed25519.rs │ │ │ ├── kdf.rs │ │ │ ├── mod.rs │ │ │ └── rsa/ │ │ │ ├── keypair.rs │ │ │ └── mod.rs │ │ ├── transparency.rs │ │ └── verification_key.rs │ ├── errors.rs │ ├── fulcio/ │ │ ├── mod.rs │ │ ├── models.rs │ │ └── oauth.rs │ ├── lib.rs │ ├── mock_client.rs │ ├── oauth/ │ │ ├── http_client.rs │ │ ├── mod.rs │ │ ├── openidflow.rs │ │ └── token.rs │ ├── registry/ │ │ ├── config.rs │ │ ├── mod.rs │ │ ├── oci_caching_client.rs │ │ ├── oci_client.rs │ │ └── oci_reference.rs │ ├── rekor/ │ │ ├── apis/ │ │ │ ├── configuration.rs │ │ │ ├── entries_api.rs │ │ │ ├── index_api.rs │ │ │ ├── mod.rs │ │ │ ├── pubkey_api.rs │ │ │ └── tlog_api.rs │ │ ├── mod.rs │ │ └── models/ │ │ ├── alpine.rs │ │ ├── alpine_all_of.rs │ │ ├── checkpoint.rs │ │ ├── consistency_proof.rs │ │ ├── error.rs │ │ ├── hashedrekord.rs │ │ ├── hashedrekord_all_of.rs │ │ ├── helm.rs │ │ ├── helm_all_of.rs │ │ ├── inactive_shard_log_info.rs │ │ ├── inclusion_proof.rs │ │ ├── intoto.rs │ │ ├── intoto_all_of.rs │ │ ├── jar.rs │ │ ├── jar_all_of.rs │ │ ├── log_entry.rs │ │ ├── log_info.rs │ │ ├── mod.rs │ │ ├── proposed_entry.rs │ │ ├── rekord.rs │ │ ├── rekord_all_of.rs │ │ ├── rfc3161.rs │ │ ├── rfc3161_all_of.rs │ │ ├── rpm.rs │ │ ├── rpm_all_of.rs │ │ ├── search_index.rs │ │ ├── search_index_public_key.rs │ │ ├── search_log_query.rs │ │ ├── tuf.rs │ │ └── tuf_all_of.rs │ └── trust/ │ ├── mod.rs │ └── sigstore/ │ ├── constants.rs │ ├── mod.rs │ └── transport.rs ├── tests/ │ ├── conformance/ │ │ ├── Cargo.toml │ │ └── conformance.rs │ └── data/ │ └── keys/ │ ├── cosign_generated_encrypted_empty_private.key │ ├── ecdsa_encrypted_private.key │ ├── ecdsa_private.key │ ├── ed25519_encrypted_private.key │ ├── ed25519_private.key │ ├── rsa_encrypted_private.key │ └── rsa_private.key └── trust_root/ └── prod/ ├── root.json └── trusted_root.json ================================================ FILE CONTENTS ================================================ ================================================ FILE: .cargo/audit.toml ================================================ [advisories] ignore = [ "RUSTSEC-2023-0071", # "Classic" RSA timing sidechannel attack from non-constant-time implementation. # Okay for local use. # https://rustsec.org/advisories/RUSTSEC-2023-0071.html "RUSTSEC-2024-0370", # This is a warning about `proc-macro-errors` being unmaintained. It's a transitive dependency of `sigstore` and `oci-spec`. ] ================================================ FILE: .github/dependabot.yml ================================================ # # Copyright 2022 The Sigstore Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version: 2 updates: - package-ecosystem: cargo directory: "/" schedule: interval: weekly open-pull-requests-limit: 10 cooldown: default-days: 7 - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly" open-pull-requests-limit: 10 cooldown: default-days: 7 ================================================ FILE: .github/workflows/auto-publish-crates-upon-release.yml ================================================ name: Publish-Crate-Upon-Release on: release: types: [published] permissions: contents: read concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: publish-automatically: name: Publish crates runs-on: ubuntu-latest permissions: id-token: write # needed to get OpenID Connect token for authentication steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: rust-lang/crates-io-auth-action@bbd81622f20ce9e2dd9622e3218b975523e45bbe # v1.0.4 id: auth - name: Rustup run: | rustup install stable rustup override set stable - name: publish crates env: CARGO_REGISTRY_TOKEN: "${{ steps.auth.outputs.token }}" run: cargo publish ================================================ FILE: .github/workflows/conformance.yml ================================================ on: [workflow_dispatch] name: Conformance Suite permissions: contents: read concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: conformance: name: Check sigstore conformance runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - name: Rustup run: | rustup install --profile minimal stable rustup override set stable - uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1 - run: | cargo build --manifest-path=tests/conformance/Cargo.toml - uses: sigstore/sigstore-conformance@main with: entrypoint: ${{ github.workspace }}/tests/conformance/target/debug/sigstore ================================================ FILE: .github/workflows/security-audit.yml ================================================ name: Security audit on: schedule: - cron: "0 0 * * *" push: paths: - "**/Cargo.toml" - "**/Cargo.lock" permissions: {} concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: audit: name: Audit for vulnerable crates permissions: checks: write # for rustsec/audit-check to create check contents: read # for actions/checkout to fetch code issues: write # for rustsec/audit-check to create issues runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - name: Generate lockfile run: cargo generate-lockfile - uses: rustsec/audit-check@69366f33c96575abad1ee0dba8212993eecbe998 # v2.0.0 with: token: ${{ secrets.GITHUB_TOKEN }} ================================================ FILE: .github/workflows/tests.yml ================================================ on: [push, pull_request] name: Continuous integration permissions: contents: read concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: check: name: Check features runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: dtolnay/rust-toolchain@e081816240890017053eacbb1bdf337761dc5582 # 1.95.0 - uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1 - uses: taiki-e/install-action@481c34c1cf3a84c68b5e46f4eccfc82af798415a # v2.75.23 with: tool: cargo-hack # cosign, full, mock-client, registry will ONLY compile with a *-tls feature - run: | cargo hack check --each-feature --skip cosign --skip full --skip mock-client --skip registry check-native-tls: name: Check native-tls features runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: dtolnay/rust-toolchain@e081816240890017053eacbb1bdf337761dc5582 # 1.95.0 - uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1 - uses: taiki-e/install-action@481c34c1cf3a84c68b5e46f4eccfc82af798415a # v2.75.23 with: tool: cargo-hack - run: | cargo hack check --feature-powerset --features native-tls --skip wasm --skip test-registry --skip rustls-tls --skip rustls-tls-native-roots check-rustls-tls: name: Check rusttls-tls features runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: dtolnay/rust-toolchain@e081816240890017053eacbb1bdf337761dc5582 # 1.95.0 - uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1 - uses: taiki-e/install-action@481c34c1cf3a84c68b5e46f4eccfc82af798415a # v2.75.23 with: tool: cargo-hack - run: | cargo hack check --feature-powerset --features rustls-tls --skip wasm --skip test-registry --skip native-tls test-native-tls: name: Test Suite runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: dtolnay/rust-toolchain@e081816240890017053eacbb1bdf337761dc5582 # 1.95.0 - uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1 - run: | cargo test --workspace --no-default-features --features full,native-tls,test-registry test-rustls-tls: name: Test Suite runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: dtolnay/rust-toolchain@e081816240890017053eacbb1bdf337761dc5582 # 1.95.0 - uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1 - run: | cargo test --workspace --no-default-features --features full,rustls-tls,test-registry check-wasm: name: Check wasm32-unknown-unknown target runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: dtolnay/rust-toolchain@e081816240890017053eacbb1bdf337761dc5582 # 1.95.0 with: targets: wasm32-unknown-unknown - uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1 - run: | cargo check --no-default-features --features wasm --target wasm32-unknown-unknown doc: name: Build Documentation runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: dtolnay/rust-toolchain@351f82a4dc29e4159746a068ed925da17341219f # nightly - uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1 - run: | make doc toml: name: Toml format runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1 - uses: dtolnay/rust-toolchain@e081816240890017053eacbb1bdf337761dc5582 # 1.95.0 - uses: taiki-e/install-action@481c34c1cf3a84c68b5e46f4eccfc82af798415a # v2.75.23 with: tool: taplo-cli - run: | taplo fmt --check fmt: name: Rustfmt runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: dtolnay/rust-toolchain@e081816240890017053eacbb1bdf337761dc5582 # 1.95.0 with: components: rustfmt - uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1 - run: | cargo fmt --all -- --check clippy: name: Clippy runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - uses: dtolnay/rust-toolchain@e081816240890017053eacbb1bdf337761dc5582 # 1.95.0 with: components: clippy - uses: Swatinem/rust-cache@c19371144df3bb44fab255c43d04cbc2ab54d1c4 # v2.9.1 - run: | cargo clippy --workspace -- -D warnings ================================================ FILE: .github/workflows/zizmor.yml ================================================ name: 'zizmor: GitHub Actions Security Analysis' on: push: branches: ["main"] pull_request: branches: ["**"] permissions: contents: read concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true jobs: zizmor: name: Zizmor runs-on: ubuntu-24.04 permissions: security-events: write # needed to create vulnerability alerts steps: - name: Checkout repository uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 with: persist-credentials: false - name: Run zizmor 🌈 uses: zizmorcore/zizmor-action@b1d7e1fb5de872772f31590499237e7cce841e8e # v0.5.3 with: persona: pedantic ================================================ FILE: .gitignore ================================================ debug/ target/ # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html Cargo.lock # These are backup files generated by rustfmt **/*.rs.bk # MSVC Windows builds of rustc generate these, which store debugging information *.pdb # ide files .idea .vscode ================================================ FILE: .taplo.toml ================================================ exclude = [".cargo/audit.toml"] [[rule]] [rule.formatting] indent_string = " " reorder_arrays = true reorder_keys = true ================================================ FILE: CHANGELOG.md ================================================ # v0.10.0 ## What's Changed - chore(deps): Update oci-distribution requirement from 0.10 to 0.11 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/343 - verify: init by @jleightcap in https://github.com/sigstore/sigstore-rs/pull/311 - chore(deps): Update rstest requirement from 0.18.1 to 0.19.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/351 - chore(deps): Bump actions/checkout from 4.1.2 to 4.1.5 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/360 - fix linter warning by @flavio in https://github.com/sigstore/sigstore-rs/pull/361 - chore(deps): Update cached requirement from 0.49.2 to 0.51.3 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/362 - chore(deps): Update webbrowser requirement from 0.8.12 to 1.0.1 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/359 - chore(deps): Bump actions/checkout from 4.1.5 to 4.1.6 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/363 - chore(deps): Update testcontainers requirement from 0.15 to 0.16 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/355 - chore(deps): change provider of cargo-audit GH action by @flavio in https://github.com/sigstore/sigstore-rs/pull/364 - fix docs by @flavio in https://github.com/sigstore/sigstore-rs/pull/366 - fix: allow ManualTrustRoot to have multiple rekor keys by @flavio in https://github.com/sigstore/sigstore-rs/pull/365 - build(deps): update testcontainers requirement from 0.16 to 0.17 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/368 - build(deps): update rstest requirement from 0.19.0 to 0.21.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/370 - build(deps): bump actions/checkout from 4.1.6 to 4.1.7 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/372 - build(deps): update testcontainers requirement from 0.17 to 0.18 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/371 - Signed Certificate Timestamp verification by @tnytown in https://github.com/sigstore/sigstore-rs/pull/326 - transparency: pull OID constants from `const-oid` by @tnytown in https://github.com/sigstore/sigstore-rs/pull/374 - build(deps): update testcontainers requirement from 0.18 to 0.19 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/375 - build(deps): update cached requirement from 0.51.3 to 0.52.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/377 - build(deps): update testcontainers requirement from 0.19 to 0.20 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/376 - build(deps): update cached requirement from 0.52.0 to 0.53.1 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/379 - build(deps): update rstest requirement from 0.21.0 to 0.22.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/383 - build(deps): update testcontainers requirement from 0.20 to 0.21 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/382 - build(deps): update testcontainers requirement from 0.21 to 0.22 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/386 - fix: Allow empty passwords for encrypted pem files by @gmpinder in https://github.com/sigstore/sigstore-rs/pull/381 - build(deps): update tough requirement from 0.17.1 to 0.18.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/389 - dependency cleanup by @flavio in https://github.com/sigstore/sigstore-rs/pull/390 - chore: update cargo audit ignore list by @flavio in https://github.com/sigstore/sigstore-rs/pull/387 ## New Contributors - @tnytown made their first contribution in https://github.com/sigstore/sigstore-rs/pull/326 - @gmpinder made their first contribution in https://github.com/sigstore/sigstore-rs/pull/381 **Full Changelog**: https://github.com/sigstore/sigstore-rs/compare/v0.9.0...v0.10.0 # v0.9.0 ## What's Changed - sign: init by @jleightcap in https://github.com/sigstore/sigstore-rs/pull/310 - cargo audit: ignore RUSTSEC-2023-0071 by @jleightcap in https://github.com/sigstore/sigstore-rs/pull/321 - chore(deps): Update json-syntax requirement from 0.9.6 to 0.10.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/319 - chore(deps): Update cached requirement from 0.46.0 to 0.47.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/323 - chore(deps): Update serial_test requirement from 2.0.0 to 3.0.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/322 - dep: update rustls-webpki, fold in pki_types by @jleightcap in https://github.com/sigstore/sigstore-rs/pull/324 - chore(deps): Update cached requirement from 0.47.0 to 0.48.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/325 - chore(deps): Update json-syntax requirement from 0.10.0 to 0.11.1 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/327 - chore(deps): Update cached requirement from 0.48.0 to 0.49.2 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/329 - chore(deps): Update json-syntax requirement from 0.11.1 to 0.12.2 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/330 - lint: fix lint error of chrono and tokio by @Xynnn007 in https://github.com/sigstore/sigstore-rs/pull/334 - chore(deps): Update base64 requirement from 0.21.0 to 0.22.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/332 - The `Repository` trait and `ManualRepository` struct no longer require a feature flag by @tannaurus in https://github.com/sigstore/sigstore-rs/pull/331 - chore(deps): Bump actions/checkout from 4.1.1 to 4.1.2 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/336 - chore(deps): Update reqwest requirement from 0.11 to 0.12 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/341 - update tough dep by @astoycos in https://github.com/sigstore/sigstore-rs/pull/340 - Tag the 0.9.0 release by @flavio in https://github.com/sigstore/sigstore-rs/pull/342 ## New Contributors - @tannaurus made their first contribution in https://github.com/sigstore/sigstore-rs/pull/331 - @astoycos made their first contribution in https://github.com/sigstore/sigstore-rs/pull/340 **Full Changelog**: https://github.com/sigstore/sigstore-rs/compare/v0.8.0...v0.9.0 # v0.8.0 ## What's Changed - chore(deps): Update rstest requirement from 0.17.0 to 0.18.1 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/282 - chore(deps): do not enable default features of chrono by @flavio in https://github.com/sigstore/sigstore-rs/pull/286 - chore(deps): Update pem requirement from 2.0 to 3.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/289 - conformance: add conformance CLI and action by @jleightcap in https://github.com/sigstore/sigstore-rs/pull/287 - chore: fix clippy warnings by @flavio in https://github.com/sigstore/sigstore-rs/pull/292 - chore(deps): Bump actions/checkout from 3.5.3 to 3.6.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/291 - chore(deps): Update tough requirement from 0.13 to 0.14 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/290 - chore(deps): update to latest version of picky by @flavio in https://github.com/sigstore/sigstore-rs/pull/293 - chore(deps): Bump actions/checkout from 3.6.0 to 4.0.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/294 - chore: add repository link to Cargo metadata by @flavio in https://github.com/sigstore/sigstore-rs/pull/297 - chore(deps): Update cached requirement from 0.44.0 to 0.45.1 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/298 - chore(deps): Bump actions/checkout from 4.0.0 to 4.1.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/302 - chore(deps): Update cached requirement from 0.45.1 to 0.46.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/301 - chore(deps): Update testcontainers requirement from 0.14 to 0.15 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/303 - chore(deps): Bump actions/checkout from 4.1.0 to 4.1.1 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/304 - cosign/tuf: use trustroot by @jleightcap in https://github.com/sigstore/sigstore-rs/pull/305 - Fix broken tests, update deps by @flavio in https://github.com/sigstore/sigstore-rs/pull/313 ## New Contributors - @jleightcap made their first contribution in https://github.com/sigstore/sigstore-rs/pull/287 **Full Changelog**: https://github.com/sigstore/sigstore-rs/compare/v0.7.2...v0.8.0 # v0.7.2 ## What's Changed - chore(deps): Update cached requirement from 0.42.0 to 0.44.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/277 - chore(deps): Bump actions/checkout from 3.5.2 to 3.5.3 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/278 - chore(deps): update picky dependency by @flavio in https://github.com/sigstore/sigstore-rs/pull/279 **Full Changelog**: https://github.com/sigstore/sigstore-rs/compare/v0.7.1...v0.7.2 # v0.7.1 ## What's Changed - fix: ensure cosign client can be sent between threads by @flavio in https://github.com/sigstore/sigstore-rs/pull/275 **Full Changelog**: https://github.com/sigstore/sigstore-rs/compare/v0.7.0...v0.7.1 # v0.7.0 ## What's Changed - Fix typo in SignatureLayer::new doc comment by @danbev in https://github.com/sigstore/sigstore-rs/pull/170 - feat: replace example dependency docker_credential by @Xynnn007 in https://github.com/sigstore/sigstore-rs/pull/172 - Clean up readme by @lukehinds in https://github.com/sigstore/sigstore-rs/pull/173 - chore(deps): Update rstest requirement from 0.15.0 to 0.16.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/174 - Fix typo in simple_signing.rs by @danbev in https://github.com/sigstore/sigstore-rs/pull/175 - Introduce SignedArtifactBundle by @danbev in https://github.com/sigstore/sigstore-rs/pull/171 - chore(deps): Update base64 requirement from 0.13.0 to 0.20.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/177 - chore(deps): Bump actions/checkout from 3.1.0 to 3.2.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/180 - chore(deps): Update serial_test requirement from 0.9.0 to 0.10.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/182 - chore(deps): Update cached requirement from 0.40.0 to 0.41.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/181 - Fix typo in SecretBoxCipher doc comment by @danbev in https://github.com/sigstore/sigstore-rs/pull/179 - chore(deps): Update cached requirement from 0.41.0 to 0.42.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/185 - chore(deps): Bump actions/checkout from 3.2.0 to 3.3.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/183 - chore(deps): Update base64 requirement from 0.20.0 to 0.21.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/184 - Add cosign verify-bundle example by @danbev in https://github.com/sigstore/sigstore-rs/pull/186 - Fix incorrect base64_signature doc comment by @danbev in https://github.com/sigstore/sigstore-rs/pull/188 - Fix typos in tuf/mod.rs by @danbev in https://github.com/sigstore/sigstore-rs/pull/195 - chore(deps): Update serial_test requirement from 0.10.0 to 1.0.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/200 - fix: show actual response status field by @ctron in https://github.com/sigstore/sigstore-rs/pull/197 - Update target -> target_name for consistency by @danbev in https://github.com/sigstore/sigstore-rs/pull/196 - fix: make the fields accessible by @ctron in https://github.com/sigstore/sigstore-rs/pull/202 - Add verify-bundle example to README.md by @danbev in https://github.com/sigstore/sigstore-rs/pull/203 - fix: make fields of hash accessible by @ctron in https://github.com/sigstore/sigstore-rs/pull/205 - Improve public key output and add file output by @Gronner in https://github.com/sigstore/sigstore-rs/pull/194 - Add TokenProvider::Static doc comment by @danbev in https://github.com/sigstore/sigstore-rs/pull/208 - Changed the type of LogEntry.body from String to Body by @Neccolini in https://github.com/sigstore/sigstore-rs/pull/207 - Fix errors/warnings reported by clippy by @danbev in https://github.com/sigstore/sigstore-rs/pull/210 - Add fine-grained features to control the compilation by @Xynnn007 in https://github.com/sigstore/sigstore-rs/pull/189 - fix: bring tuf feature out of rekor and add related docs by @Xynnn007 in https://github.com/sigstore/sigstore-rs/pull/211 - chore: update crypto deps by @flavio in https://github.com/sigstore/sigstore-rs/pull/204 - Replace `x509-parser` with `x509-cert` by @Xynnn007 in https://github.com/sigstore/sigstore-rs/pull/212 - Fix: Wrong parameter order inside documentation example. by @vembacher in https://github.com/sigstore/sigstore-rs/pull/215 - Remove lines about timestamp in lib.rs by @naveensrinivasan in https://github.com/sigstore/sigstore-rs/pull/213 - Fix ed25519 version conflict by @vembacher in https://github.com/sigstore/sigstore-rs/pull/223 - Support compiling to wasm32 architectures by @lulf in https://github.com/sigstore/sigstore-rs/pull/221 - Fix link to contributor doc in readme by @oliviacrain in https://github.com/sigstore/sigstore-rs/pull/225 - refactor: derive `Clone` trait by @flavio in https://gitub.com/sigstore/sigstore-rs/pull/227 - fix: correct typo in verify/main.rs by @danbev in https://github.com/sigstore/sigstore-rs/pull/228 - chore(deps): Update tough requirement from 0.12 to 0.13 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/237 - chore(deps): Bump actions/checkout from 3.3.0 to 3.4.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/240 - dep: update picky version to git rid of `ring` by @Xynnn007 in https://github.com/sigstore/sigstore-rs/pull/226 - chore(deps): Bump actions/checkout from 3.4.0 to 3.5.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/245 - fix: make LogEntry Body an enum by @danbev in https://github.com/sigstore/sigstore-rs/pull/244 - Add verify-blob example by @danbev in https://github.com/sigstore/sigstore-rs/pull/239 - Introduce Newtype `OciReference` into API for OCI image references. by @vembacher in https://github.com/sigstore/sigstore-rs/pull/216 - Swap over to using CDN to fetch TUF metadata by @haydentherapper in https://github.com/sigstore/sigstore-rs/pull/251 - chore(deps): Bump actions/checkout from 3.5.0 to 3.5.2 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/252 - upgrade 'der' to 0.7.5 by @dmitris in https://github.com/sigstore/sigstore-rs/pull/257 - remove unused 'clock' feature for chrono by @dmitris in https://github.com/sigstore/sigstore-rs/pull/258 - update pkcs1 from 0.4.0 to 0.7.5 by @dmitris in https://github.com/sigstore/sigstore-rs/pull/260 - use 2021 Rust edition by @dmitris in https://github.com/sigstore/sigstore-rs/pull/261 - chore(deps): Update serial_test requirement from 1.0.0 to 2.0.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/264 - update scrypt to 0.11.0, adapt for API change (fix #231) by @dmitris in https://github.com/sigstore/sigstore-rs/pull/268 - upgrade ed25519-dalek to 2.0.0-rc.2 by @dmitris in https://github.com/sigstore/sigstore-rs/pull/263 - chore(deps): Update openidconnect requirement from 2.3 to 3.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/265 - chore(deps): Update rstest requirement from 0.16.0 to 0.17.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/271 - Update crypto deps by @flavio in https://github.com/sigstore/sigstore-rs/pull/269 - Update create_log_entry example to create key pair. by @jvanz in https://github.com/sigstore/sigstore-rs/pull/206 ## New Contributors - @ctron made their first contribution in https://github.com/sigstore/sigstore-rs/pull/197 - @Gronner made their first contribution in https://github.com/sigstore/sigstore-rs/pull/194 - @Neccolini made their first contribution in https://github.com/sigstore/sigstore-rs/pull/207 - @vembacher made their first contribution in https://github.com/sigstore/sigstore-rs/pull/215 - @naveensrinivasan made their first contribution in https://github.com/sigstore/sigstore-rs/pull/213 - @lulf made their first contribution in https://github.com/sigstore/sigstore-rs/pull/221 - @oliviacrain made their first contribution in https://github.com/sigstore/sigstore-rs/pull/225 - @haydentherapper made their first contribution in https://github.com/sigstore/sigstore-rs/pull/251 - @dmitris made their first contribution in https://github.com/sigstore/sigstore-rs/pull/257 - @jvanz made their first contribution in https://github.com/sigstore/sigstore-rs/pull/206 **Full Changelog**: https://github.com/sigstore/sigstore-rs/compare/v0.6.0...v0.7.0h # v0.6.0 ## Fixes - Fix typo in cosign/mod.rs doc comment by @danbev in https://github.com/sigstore/sigstore-rs/pull/148 - Fix typo in KeyPair trait doc comment by @danbev in https://github.com/sigstore/sigstore-rs/pull/149 - Update cached requirement from 0.39.0 to 0.40.0 by @dependabot in https://github.com/sigstore/sigstore-rs/pull/154 - Fix typos in PublicKeyVerifier doc comments by @danbev in https://github.com/sigstore/sigstore-rs/pull/155 - Fix: CI error for auto deref by @Xynnn007 in https://github.com/sigstore/sigstore-rs/pull/160 - Fix typo and grammar in signature_layers.rs by @danbev in https://github.com/sigstore/sigstore-rs/pull/161 - Remove unused imports in examples/rekor by @danbev in https://github.com/sigstore/sigstore-rs/pull/162 - Update link to verification example by @danbev in https://github.com/sigstore/sigstore-rs/pull/156 - Fix typos in from_encrypted_pem doc comments by @danbev in https://github.com/sigstore/sigstore-rs/pull/164 - Fix typos in doc comments by @danbev in https://github.com/sigstore/sigstore-rs/pull/163 - Update path to fulcio-cert in verify example by @danbev in https://github.com/sigstore/sigstore-rs/pull/168 ## Enhancements - Add getter functions for LogEntry fields by @lkatalin in https://github.com/sigstore/sigstore-rs/pull/147 - Add TreeSize alias to Rekor by @avery-blanchard in https://github.com/sigstore/sigstore-rs/pull/151 - Updates for parsing hashedrekord LogEntry by @lkatalin in https://github.com/sigstore/sigstore-rs/pull/152 - Add certificate based verification by @flavio in https://github.com/sigstore/sigstore-rs/pull/159 - Add support for OCI Image signing (spec v1.0) by @Xynnn007 in https://github.com/sigstore/sigstore-rs/pull/158 ## Contributors - Avery Blanchard (@avery-blanchardmade) - Daniel Bevenius (@danbev) - Flavio Castelli (@flavio) - Lily Sturmann (@lkatalin) - Xynnn (@Xynnn007) # v0.5.3 ## Fixes - rustls should not require openssl by (https://github.com/sigstore/sigstore-rs/pull/146) ## Others - Rework Rekor module structure and enable doc tests (https://github.com/sigstore/sigstore-rs/pull/145) ## Contributors - Flavio Castelli (@flavio) - Lily Sturmann (@lkatalin) # v0.5.2 ## Fixes - Address compilation error (https://github.com/sigstore/sigstore-rs/pull/143) ## Contributors - Flavio Castelli (@flavio) # v0.5.1 ## Fixes - fix verification of signatures produced with PKI11 (https://github.com/sigstore/sigstore-rs/pull/142) ## Others - Update rsa dependency to stable version 0.7.0 (https://github.com/sigstore/sigstore-rs/pull/141) - Bump actions/checkout from 3.0.2 to 3.1.0 (https://github.com/sigstore/sigstore-rs/pull/140) ## Contributors - Flavio Castelli (@flavio) - Xynnn (@Xynnn007) # v0.5.0 ## Enhancements - update user-agent value to be specific to sigstore-rs (https://github.com/sigstore/sigstore-rs/pull/122) - remove /api/v1/version from client by (https://github.com/sigstore/sigstore-rs/pull/121) - crate async fulcio client (https://github.com/sigstore/sigstore-rs/pull/132) - Removed ring dependency (https://github.com/sigstore/sigstore-rs/pull/127) ## Others - Update dependencies - Refactoring and examples for key interface (https://github.com/sigstore/sigstore-rs/pull/123) - Fix doc test failures (https://github.com/sigstore/sigstore-rs/pull/136) ## Contributors - Bob Callaway (@bobcallaway) - Bob McWhirter (@bobmcwhirter) - Flavio Castelli (@flavio) - Luke Hinds (@lukehinds) - Xynnn (@Xynnn007) # v0.4.0 ## Enhancements - feat: from and to interface for signing and verification keys (https://github.com/sigstore/sigstore-rs/pulls/115) - Refactor examples to support subfolder execution (https://github.com/sigstore/sigstore-rs/pulls/111) - Integrate Rekor with Sigstore-rs (https://github.com/sigstore/sigstore-rs/pulls/88) - feat: add example case and docs for key interface (https://github.com/sigstore/sigstore-rs/pulls/99) - feat: add signing key module (https://github.com/sigstore/sigstore-rs/pulls/87) ## Documention - Update readme to include new features (https://github.com/sigstore/sigstore-rs/pulls/113) ## Others - bump crate version (https://github.com/sigstore/sigstore-rs/pulls/118) - Add RUSTSEC-2021-0139 to audit.toml (https://github.com/sigstore/sigstore-rs/pulls/112) - Update xsalsa20poly1305 requirement from 0.7.1 to 0.9.0 (https://github.com/sigstore/sigstore-rs/pulls/101) - ignore derive_partial_eq_without_eq (https://github.com/sigstore/sigstore-rs/pulls/102) - fix clippy lints (https://github.com/sigstore/sigstore-rs/pulls/98) ## Contributors - Carlos Tadeu Panato Junior (@cpanato) - Flavio Castelli (@flavio) - Jyotsna (@jyotsna-penumaka) - Lily Sturmann (@lkatalin) - Luke Hinds (@lukehinds) - Tony Arcieri (@tarcieri) - Xynnn\_ (@Xynnn007) ================================================ FILE: CODEOWNERS ================================================ # The CODEOWNERS are managed via a GitHub team, but the current list is (in alphabetical order): # # flavio # lukehinds # arsa # viccuad # Xynnn007 @sigstore-rs-codeowners ================================================ FILE: CODE_OF_CONDUCT.md ================================================ # Code of Conduct ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: * Using welcoming and inclusive language * Being respectful of differing viewpoints and experiences * Gracefully accepting constructive criticism * Focusing on what is best for the community * Showing empathy towards other community members Examples of unacceptable behavior by participants include: * The use of sexualized language or imagery and unwelcome sexual attention or advances * Trolling, insulting/derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or electronic address, without explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] [homepage]: http://contributor-covenant.org [version]: http://contributor-covenant.org/version/1/4/ ================================================ FILE: CONTRIBUTORS.md ================================================ # Contributing When contributing to this repository, please first discuss the change you wish to make via an [issue](https://github.com/sigstore/sigstore-rs/issues). ## Building and testing The Makefile contains useful targets for editing source code (`make lint`, `make fmt`), building executables (`make build`) and testing (`make test`). Full test suite requires Docker daemon to be running (and the user must have permissions to start a container). ## Pull Request Process 1. Create an [issue](https://github.com/sigstore/sigstore-rs/issues) outlining the fix or feature. 2. Fork the sigstore-rs repository to your own github account and clone it locally. 3. Hack on your changes. 4. Update the README.md with details of changes to any interface, this includes new environment variables, exposed ports, useful file locations, CLI parameters and new or changed configuration values. 5. Correctly format your commit message see [Commit Messages](#commit-message-guidelines) below. 6. Ensure that CI passes, if it fails, fix the failures. 7. Every pull request requires a review from the [core sigstore-rs team](https://github.com/orgs/github.com/sigstore/teams/sigstore-rs-codeowners) before merging. 8. If your pull request consists of more than one commit, please squash your commits as described in [Squash Commits](#squash-commits) ## Commit Message Guidelines We follow the commit formatting recommendations found on [Chris Beams' How to Write a Git Commit Message article]((https://chris.beams.io/posts/git-commit/). Well formed commit messages not only help reviewers understand the nature of the Pull Request, but also assists the release process where commit messages are used to generate release notes. A good example of a commit message would be as follows: ``` Summarize changes in around 50 characters or less More detailed explanatory text, if necessary. Wrap it to about 72 characters or so. In some contexts, the first line is treated as the subject of the commit and the rest of the text as the body. The blank line separating the summary from the body is critical (unless you omit the body entirely); various tools like `log`, `shortlog` and `rebase` can get confused if you run the two together. Explain the problem that this commit is solving. Focus on why you are making this change as opposed to how (the code explains that). Are there side effects or other unintuitive consequences of this change? Here's the place to explain them. Further paragraphs come after blank lines. - Bullet points are okay, too - Typically a hyphen or asterisk is used for the bullet, preceded by a single space, with blank lines in between, but conventions vary here If you use an issue tracker, put references to them at the bottom, like this: Resolves: #123 See also: #456, #789 ``` Note the `Resolves #123` tag, this references the issue raised and allows us to ensure issues are associated and closed when a pull request is merged. Please refer to [the github help page on message types](https://help.github.com/articles/closing-issues-using-keywords/) for a complete list of issue references. ## Squash Commits Should your pull request consist of more than one commit (perhaps due to a change being requested during the review cycle), please perform a git squash once a reviewer has approved your pull request. A squash can be performed as follows. Let's say you have the following commits: initial commit second commit final commit Run the command below with the number set to the total commits you wish to squash (in our case 3 commits): git rebase -i HEAD~3 You default text editor will then open up and you will see the following:: pick eb36612 initial commit pick 9ac8968 second commit pick a760569 final commit # Rebase eb1429f..a760569 onto eb1429f (3 commands) We want to rebase on top of our first commit, so we change the other two commits to `squash`: pick eb36612 initial commit squash 9ac8968 second commit squash a760569 final commit After this, should you wish to update your commit message to better summarise all of your pull request, run: git commit --amend You will then need to force push (assuming your initial commit(s) were posted to github): git push origin your-branch --force Alternatively, a core member can squash your commits within Github. ## Code of Conduct sigstore-rs adheres to and enforces the [Contributor Covenant](http://contributor-covenant.org/version/1/4/) Code of Conduct. Please take a moment to read the [CODE_OF_CONDUCT.md](https://github.com/sigstore/sigstore-rs/blob/master/CODE_OF_CONDUCT.md) document. ================================================ FILE: COPYRIGHT.txt ================================================ Copyright 2021 The Sigstore Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: Cargo.toml ================================================ [package] authors = ["sigstore-rs developers"] description = "An experimental crate to interact with sigstore" edition = "2024" license = "Apache-2.0" name = "sigstore" readme = "README.md" repository = "https://github.com/sigstore/sigstore-rs" version = "0.13.0" [package.metadata.docs.rs] all-features = true [features] default = ["full", "native-tls"] bundle = ["sign", "verify"] cached-client = ["dep:cached"] cert = ["dep:aws-lc-rs", "rustls-webpki/aws-lc-rs"] cosign = [ "cert", "dep:async-trait", "dep:cfg-if", "dep:oci-client", "dep:regex", "dep:serde_json_canonicalizer", "registry", ] fulcio = [ "dep:reqwest", "dep:serde_repr", "dep:serde_with", "dep:webbrowser", "oauth", ] full = [ "bundle", "cached-client", "cosign", "fulcio", "mock-client", "rekor", "sigstore-trust-root", ] mock-client = ["dep:async-trait", "dep:oci-client"] native-tls = ["oci-client?/native-tls", "reqwest?/native-tls"] oauth = ["dep:openidconnect", "dep:reqwest", "reqwest?/blocking"] registry = ["dep:async-trait", "dep:oci-client", "dep:serde_json_canonicalizer"] rekor = [ "dep:hex", "dep:hex-literal", "dep:reqwest", "dep:serde_json_canonicalizer", "reqwest?/query", ] rustls-tls = ["oci-client?/rustls-tls", "reqwest?/rustls"] sign = [ "cert", "dep:hex", "dep:serde_json_canonicalizer", "dep:sigstore_protobuf_specs", "fulcio", "rekor", ] sigstore-trust-root = [ "dep:async-trait", "dep:futures", "dep:futures-util", "dep:hex", "dep:reqwest", "dep:sigstore_protobuf_specs", "dep:tough", "reqwest?/stream", "tokio/sync", ] test-registry = [] # used for testing against a test registry verify = [ "cert", "dep:hex", "dep:serde_json_canonicalizer", "dep:sigstore_protobuf_specs", "fulcio", "rekor", ] wasm = ["chrono/wasmbind", "ring/wasm32_unknown_unknown_js"] [dependencies] async-trait = { version = "0.1", optional = true, default-features = false } base64 = { version = "0.22", default-features = false, features = ["std"] } cached = { version = "0.59", optional = true, features = ["async"] } cfg-if = { version = "1.0.0", optional = true, default-features = false } chrono = { version = "0.4", default-features = false, features = [ "now", "serde", ] } const-oid = { version = "0.9", default-features = false, features = ["db"] } crypto_secretbox = { version = "0.1", default-features = false, features = [ "alloc", "salsa20", ] } digest = { version = "0.10", default-features = false } ecdsa = { version = "0.16", default-features = false, features = [ "der", "digest", "pkcs8", "signing", "std", ] } ed25519 = { version = "2.2", default-features = false, features = ["alloc"] } ed25519-dalek = { version = "2.1", default-features = false, features = [ "alloc", "pkcs8", "rand_core", ] } elliptic-curve = { version = "0.13", default-features = false, features = [ "arithmetic", "pem", "std", ] } futures = { version = "0.3", default-features = false, optional = true } futures-util = { version = "0.3", default-features = false, optional = true } hex = { version = "0.4", default-features = false, optional = true, features = [ "std", ] } hex-literal = { version = "1.1", optional = true } oci-client = { version = "0.16", default-features = false, optional = true } openidconnect = { version = "4.0", default-features = false, optional = true } p256 = { version = "0.13", default-features = false, features = [ "arithmetic", "ecdsa", "pem", "std", ] } p384 = { version = "0.13", default-features = false, features = [ "arithmetic", "ecdsa", "pem", "std", ] } pem = { version = "3.0", default-features = false, features = ["serde", "std"] } pkcs1 = { version = "0.7", default-features = false, features = ["std"] } pkcs8 = { version = "0.10", default-features = false, features = [ "encryption", "pem", "pkcs5", "std", ] } pki-types = { package = "rustls-pki-types", version = "1.11", default-features = false } rand = { version = "0.8", default-features = false, features = ["std"] } regex = { version = "1.10", default-features = false, optional = true } reqwest = { version = "0.13", default-features = false, features = [ "json", "multipart", ], optional = true } # used only by the wasm feature ring = { version = "0", default-features = false, optional = true } rsa = { version = "0.9", default-features = false, features = ["std"] } rustls-webpki = { version = "0.103", default-features = false, features = [ "std", ] } scrypt = { version = "0.11", default-features = false, features = [ "simple", "std", ] } serde = { version = "1.0", default-features = false, features = ["derive"] } serde_json = { version = "1.0", default-features = false, features = ["std"] } serde_json_canonicalizer = { version = "0.3", optional = true } serde_repr = { version = "0.1", default-features = false, optional = true } serde_with = { version = "3.9", default-features = false, features = [ "base64", "json", ], optional = true } sha2 = { version = "0.10", default-features = false, features = ["oid"] } signature = { version = "2.2", default-features = false } sigstore_protobuf_specs = { version = "0.5", default-features = false, optional = true } thiserror = { version = "2.0", default-features = false, features = ["std"] } tls_codec = { version = "0.4", default-features = false, features = ["derive"] } tokio = { version = "1", default-features = false, features = ["rt"] } tokio-util = { version = "0.7", default-features = false, features = [ "io-util", ] } tough = { version = "0.22", default-features = false, optional = true } tracing = { version = "0.1", default-features = false } url = { version = "2.5", default-features = false } webbrowser = { version = "1.0", default-features = false, optional = true } x509-cert = { version = "0.2", default-features = false, features = [ "builder", "pem", "sct", "std", ] } zeroize = { version = "1.8", default-features = false } [dev-dependencies] anyhow = { version = "1.0", default-features = false, features = [ "backtrace", "std", ] } assert-json-diff = { version = "2.0", default-features = false } clap = { version = "4.5", default-features = false, features = [ "color", "derive", "error-context", "help", "std", "suggestions", "usage", ] } docker_credential = { version = "1.3", default-features = false } openssl = { version = "0.10", default-features = false } rstest = { version = "0.26", default-features = false } serial_test = { version = "3.1", default-features = false } tempfile = { version = "3.12", default-features = false } testcontainers = { version = "0.27", default-features = false, features = [ "aws-lc-rs", ] } tracing-subscriber = { version = "0.3", default-features = false, features = [ "ansi", "env-filter", "fmt", "smallvec", "std", "tracing-log", ] } [target.'cfg(not(target_arch = "powerpc64"))'.dependencies] aws-lc-rs = { version = "1", optional = true } [target.'cfg(target_arch = "powerpc64")'.dependencies] aws-lc-rs = { version = "1", optional = true, features = ["bindgen"] } # cosign example mappings [[example]] name = "verify" path = "examples/cosign/verify/main.rs" [[example]] name = "verify-blob" path = "examples/cosign/verify-blob/main.rs" [[example]] name = "verify-bundle" path = "examples/cosign/verify-bundle/main.rs" [[example]] name = "sign" path = "examples/cosign/sign/main.rs" # openidconnect example mappings [[example]] name = "openidconnect" path = "examples/openidflow/openidconnect/main.rs" # key interface mappings [[example]] name = "key_pair_gen_sign_verify" path = "examples/key_interface/key_pair_gen_sign_verify/main.rs" [[example]] name = "key_pair_gen_and_export" path = "examples/key_interface/key_pair_gen_and_export/main.rs" [[example]] name = "key_pair_import" path = "examples/key_interface/key_pair_import/main.rs" # rekor example mappings [[example]] name = "create_log_entry" path = "examples/rekor/create_log_entry/main.rs" [[example]] name = "get_log_entry_by_index" path = "examples/rekor/get_log_entry_by_index/main.rs" [[example]] name = "get_log_entry_by_uuid" path = "examples/rekor/get_log_entry_by_uuid/main.rs" [[example]] name = "get_log_info" path = "examples/rekor/get_log_info/main.rs" [[example]] name = "get_log_proof" path = "examples/rekor/get_log_proof/main.rs" [[example]] name = "get_public_key" path = "examples/rekor/get_public_key/main.rs" [[example]] name = "search_index" path = "examples/rekor/search_index/main.rs" [[example]] name = "search_log_query" path = "examples/rekor/search_log_query/main.rs" [[example]] name = "fulcio_cert" path = "examples/fulcio/cert/main.rs" [[example]] name = "inclusion_proof" path = "examples/rekor/merkle_proofs/inclusion.rs" [[example]] name = "consistency_proof" path = "examples/rekor/merkle_proofs/consistency.rs" ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: Makefile ================================================ .PHONY: build build: cargo build --release .PHONY: fmt fmt: cargo fmt --all -- --check taplo fmt --check .PHONY: lint lint: cargo clippy --all-targets -- -D warnings .PHONY: doc doc: RUSTDOCFLAGS="--cfg docsrs -D warnings" cargo +nightly doc --all-features --no-deps .PHONY: check-features check-features: cargo hack check --each-feature --skip cosign --skip full --skip mock-client --skip registry .PHONY: check-features-native-tls check-features-native-tls: cargo hack check --feature-powerset --features native-tls --skip wasm --skip test-registry --skip rustls-tls --skip rustls-tls-native-roots .PHONY: check-features-rustls-tls check-features-rustls-tls: cargo hack check --feature-powerset --features rustls-tls --skip wasm --skip test-registry --skip native-tls --skip rustls-tls-native-roots .PHONY: check-wasm check-wasm: cargo check --no-default-features --features wasm --target wasm32-unknown-unknown .PHONY: test test: fmt lint doc cargo test --workspace --no-default-features --features full,native-tls,test-registry cargo test --workspace --no-default-features --features full,rustls-tls,test-registry .PHONY: clean clean: cargo clean .PHONY: coverage coverage: cargo tarpaulin -o Html ================================================ FILE: README.md ================================================ Continuous integration | Docs | License | Crate version | Crate downloads ----------------------|------|---------|---------------|----------------- [![Continuous integration](https://github.com/sigstore/sigstore-rs/actions/workflows/tests.yml/badge.svg)](https://github.com/sigstore/sigstore-rs/actions/workflows/tests.yml) | [![Docs](https://img.shields.io/badge/docs-%20-blue)](https://docs.rs/sigstore/latest/sigstore) | [![License: Apache 2.0](https://img.shields.io/badge/License-Apache2.0-brightgreen.svg)](https://opensource.org/licenses/Apache-2.0) | [![Crate version](https://img.shields.io/crates/v/sigstore?style=flat-square)](https://crates.io/crates/sigstore) | [![Crate downloads](https://img.shields.io/crates/d/sigstore?style=flat-square)](https://crates.io/crates/sigstore) A crate to interact with [sigstore](https://sigstore.dev/). This crate is under active development and will not be considered stable until the 1.0 release. ## Features ### Cosign Sign and Verify The crate implements the following verification mechanisms: * Sign using a cosign key and store the signature in a registry * Verify using a given key * Verify bundle produced by transparency log (Rekor) * Verify signature produced in keyless mode, using Fulcio Web-PKI Signature annotations and certificate email can be provided at verification time. ### Fulcio Integration For use with Fulcio ephemeral key signing, an OpenID connect API is available, along with a fulcio client implementation. ### Rekor Client All rekor client APIs can be leveraged to interact with the transparency log. ### Key Interface Cryptographic key management with the following key interfaces: * Generate a key pair * Sign data * Verify signature * Export public / (encrypted) private key in PEM / DER format * Import public / (encrypted) private key in PEM / DER format #### Known limitations * The crate does not handle verification of attestations yet. ## Examples The `examples` directory contains demo programs using the library. * [`openidflow`](examples/openidflow/README.md) * [`key_interface`](examples/key_interface/README.md) * [`rekor`](examples/rekor/README.md) * [`cosign/verify`](examples/cosign/verify/README.md) * [`cosign/verify-blob`](examples/cosign/verify-blob/README.md) * [`cosign/verify-bundle`](examples/cosign/verify-bundle/README.md) * [`cosign/sign`](examples/cosign/sign/README.md) Each example can be executed with the `cargo run --example ` command. For example, `openidconnect` can be run with the following command: ```bash cargo run --example openidconnect ``` ## WebAssembly/WASM support To embedded this crate in WASM modules, build it using the `wasm` cargo feature: ```bash cargo build --no-default-features --features wasm --target wasm32-unknown-unknown ``` NOTE: The wasm32-wasi target architecture is not yet supported. ## Contributing Contributions are welcome! Please see the [contributing guidelines](CONTRIBUTORS.md) for more information. ## Security Should you discover any security issues, please refer to sigstores [security process](https://github.com/sigstore/community/security/policy) ================================================ FILE: clippy.toml ================================================ allow-panic-in-tests = true allow-unwrap-in-tests = true ================================================ FILE: examples/README.md ================================================ # sigstore-rs code examples This folder contains executable examples of the sigstore-rs library. To run any given example, simply provide the subfolder name as an argument to the `cargo run` command. ```bash cargo run --example --all-features ``` e.g. ```bash cargo run --example create_log_entry --all-features ``` ================================================ FILE: examples/bundle/README.md ================================================ This example shows how to sign and verify Sigstore signature bundles. The bundle format used here is supported by most Sigstore clients but notably cosign requires `--new-bundle-format` to do so. This example uses `sigstore::bundle` for signing and verification. The sign subcommand uses `sigstore::oauth` for interactive OIDC authorization. In addition to the bundle format, a notable difference compared to the "cosign" examples is that `sigstore::bundle` also handles the Sigstore trust root update before signing or verifying. ### Sign README.md ```console cargo run --example bundle \ sign README.md ``` A browser window will be opened to authorize signing with an OIDC identity. After the authorization the signature bundle is created in `README.md.sigstore.json`. ### Verify README.md using the signature bundle ```console cargo run --example bundle \ verify --identity --issuer README.md ```console `EMAIL` is the email address of the OIDC account and is the OIDC issuer URI that were used during signing. As an example `cargo run --example bundle verify --identity name@example.com --issuer https://github.com/login/oauth README.md` verifies that the bundle `README.md.sigstore.json` was signed by "name@example.com" as authenticated by GitHub. ================================================ FILE: examples/bundle/main.rs ================================================ use clap::{Parser, Subcommand}; use std::fs; use std::path::PathBuf; use tracing::debug; use tracing_subscriber::prelude::*; use tracing_subscriber::{EnvFilter, fmt}; use sigstore::bundle::sign::SigningContext; use sigstore::bundle::verify::{blocking::Verifier, policy}; use sigstore::oauth; #[derive(Parser, Debug)] #[clap(about = "Signing and verification example for sigstore::bundle module")] struct Cli { /// Enable verbose mode #[arg(short, long)] verbose: bool, #[command(subcommand)] command: Commands, } #[derive(Subcommand, Debug)] enum Commands { /// Verify a signature for an artifact Verify(VerifyArgs), /// Create a signature for an artifact Sign(SignArgs), } #[derive(Parser, Debug)] struct SignArgs { /// Path to the artifact to sign artifact: PathBuf, } #[derive(Parser, Debug)] struct VerifyArgs { /// Path to the artifact to verify artifact: PathBuf, /// expected signing identity (email) #[arg(long, value_name = "EMAIL")] identity: String, /// expected signing identity issuer (URI) #[arg(long, value_name = "URI")] issuer: String, } pub fn main() { let cli = Cli::parse(); // setup logging let level_filter = if cli.verbose { "debug" } else { "info" }; let filter_layer = EnvFilter::new(level_filter); tracing_subscriber::registry() .with(filter_layer) .with(fmt::layer().with_writer(std::io::stderr)) .init(); match cli.command { Commands::Sign(args) => sign(&args.artifact), Commands::Verify(args) => verify(&args.artifact, &args.identity, &args.issuer), } } fn sign(artifact_path: &PathBuf) { let filename = artifact_path .file_name() .and_then(|s| s.to_str()) .expect("Failed to parse artifact filename"); let mut artifact = fs::File::open(artifact_path) .unwrap_or_else(|_| panic!("Failed to read artifact {}", artifact_path.display())); let mut bundle_path = artifact_path.clone(); bundle_path.set_file_name(format!("{}.sigstore.json", filename)); let bundle = fs::File::create_new(&bundle_path).unwrap_or_else(|e| { println!( "Failed to create signature bundle {}: {}", bundle_path.display(), e ); std::process::exit(1); }); let token = authorize(); let email = &token.unverified_claims().email.clone(); debug!("Signing with {}", email); let signing_artifact = SigningContext::production().and_then(|ctx| { ctx.blocking_signer(token) .and_then(|session| session.sign(&mut artifact)) }); match signing_artifact { Ok(signing_artifact) => { serde_json::to_writer(bundle, &signing_artifact.to_bundle()) .expect("Failed to write bundle to file"); } Err(e) => { panic!("Failed to sign: {}", e); } } println!( "Created signature bundle {} with identity {}", bundle_path.display(), email ); } fn verify(artifact_path: &PathBuf, identity: &str, issuer: &str) { let filename = artifact_path .file_name() .and_then(|s| s.to_str()) .expect("Failed to parse artifact filename"); let mut bundle_path = artifact_path.clone(); bundle_path.set_file_name(format!("{}.sigstore.json", filename)); let bundle = fs::File::open(&bundle_path) .unwrap_or_else(|_| panic!("Failed to open signature bundle {}", &bundle_path.display())); let mut artifact = fs::File::open(artifact_path) .unwrap_or_else(|_| panic!("Failed to read artifact {}", artifact_path.display())); let bundle: sigstore::bundle::Bundle = serde_json::from_reader(bundle).expect("Failed to parse the bundle"); let verifier = Verifier::production().expect("Failed to create a verifier"); debug!("Verifying with {} (issuer {})", identity, issuer); let id_policy = policy::Identity::new(identity, issuer); if let Err(e) = verifier.verify(&mut artifact, bundle, &id_policy, true) { println!("Failed to verify: {}", e); std::process::exit(1); } println!("Verified") } fn authorize() -> oauth::IdentityToken { let oidc_url = oauth::openidflow::OpenIDAuthorize::new( "sigstore", "", "https://oauth2.sigstore.dev/auth", "http://localhost:8080", ) .auth_url() .expect("Failed to start OIDC authorization"); webbrowser::open(oidc_url.0.as_ref()).expect("Failed to open browser"); println!("Please authorize signing in web browser."); let listener = oauth::openidflow::RedirectListener::new( "127.0.0.1:8080", oidc_url.1, // client oidc_url.2, // nonce oidc_url.3, // pkce_verifier ); let (_, token) = listener .redirect_listener() .expect("Failed to receive a token"); oauth::IdentityToken::from(token) } ================================================ FILE: examples/cosign/sign/README.md ================================================ This is a simple example program that shows how perform cosign signing. The program allows also to use annotation, in the same way as `cosign sign -a key=value` does. The program prints to the standard output all the Simple Signing objects that have been successfully pushed. # Key based Signing The implementation is in [main.rs](./main.rs). Create a keypair using the official cosign client: ```console cosign generate-key-pair ``` Because the default key pair generated by cosign is `ECDSA_P256` key, so we choose to use `ECDSA_P256_SHA256_ASN1` as the signing scheme. Suppose the password used to encrypt the private key is `123`, and the target image to be signed is `172.17.0.2:5000/ubuntu` Also, let us the annotation `a=1`. Sign a container image: ```console cargo run --example sign \ --all-features \ -- \ --key cosign.key \ --image 172.17.0.2:5000/ubuntu \ --signing-scheme ECDSA_P256_SHA256_ASN1 \ --password 123 \ --verbose \ --http \ --annotations a=1 ``` Then the image will be signed. Let us then verify it. 1. Using `cosign` (golang version) ```console cosign verify --key cosign.pub \ -a a=1 \ 172.17.0.2:5000/ubuntu ``` 2. Or use `sigstore-rs` ```console cargo run --example verify -- \ --key cosign.pub \ --annotations a=1 \ --http \ 172.17.0.2:5000/ubuntu ``` ================================================ FILE: examples/cosign/sign/main.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use docker_credential::{CredentialRetrievalError, DockerCredential}; use sigstore::cosign::constraint::{AnnotationMarker, PrivateKeySigner}; use sigstore::cosign::{Constraint, CosignCapabilities, SignatureLayer}; use sigstore::crypto::SigningScheme; use sigstore::registry::{Auth, ClientConfig, ClientProtocol, OciReference}; use tracing::{debug, warn}; use zeroize::Zeroizing; extern crate anyhow; use anyhow::anyhow; extern crate clap; use clap::Parser; use std::{collections::HashMap, fs}; extern crate tracing_subscriber; use tracing_subscriber::prelude::*; use tracing_subscriber::{EnvFilter, fmt}; #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Cli { /// Verification key #[clap(short, long, required(false))] key: String, /// Signing scheme when signing and verifying #[clap(long, required(false))] signing_scheme: Option, /// Password used to decrypt private key #[clap(long, required(false))] password: Option, /// Annotations that have to be satisfied #[clap(short, long, required(false))] annotations: Vec, /// Enable verbose mode #[clap(short, long)] verbose: bool, /// Name of the image to verify #[clap(short, long)] image: OciReference, /// Whether the registry uses HTTP #[clap(long)] http: bool, } async fn run_app(cli: &Cli) -> anyhow::Result<()> { let auth = &sigstore::registry::Auth::Anonymous; let mut oci_client_config = ClientConfig::default(); match cli.http { false => oci_client_config.protocol = ClientProtocol::Https, true => oci_client_config.protocol = ClientProtocol::Http, } let client_builder = sigstore::cosign::ClientBuilder::default().with_oci_client_config(oci_client_config); let mut client = client_builder.build()?; let image = &cli.image; let (cosign_signature_image, source_image_digest) = client.triangulate(image, auth).await?; debug!(cosign_signature_image= ?cosign_signature_image, source_image_digest= ?source_image_digest); let mut signature_layer = SignatureLayer::new_unsigned(image, &source_image_digest)?; let auth = build_auth(&cosign_signature_image); debug!(auth = ?auth, "use auth"); if !cli.annotations.is_empty() { let mut values: HashMap = HashMap::new(); for annotation in &cli.annotations { let tmp: Vec<_> = annotation.splitn(2, '=').collect(); if tmp.len() == 2 { values.insert(String::from(tmp[0]), String::from(tmp[1])); } } if !values.is_empty() { let annotations_marker = AnnotationMarker { annotations: values, }; annotations_marker .add_constraint(&mut signature_layer) .expect("add annotations failed"); } } let key = Zeroizing::new(fs::read(&cli.key).map_err(|e| anyhow!("Cannot read key: {:?}", e))?); let signing_scheme = if let Some(ss) = &cli.signing_scheme { &ss[..] } else { "ECDSA_P256_SHA256_ASN1" }; let signing_scheme = SigningScheme::try_from(signing_scheme).map_err(anyhow::Error::msg)?; let password = Zeroizing::new(cli.password.clone().unwrap_or_default().as_bytes().to_vec()); let signer = PrivateKeySigner::new_with_raw(key, password, &signing_scheme) .map_err(|e| anyhow!("Cannot create private key signer: {}", e))?; signer .add_constraint(&mut signature_layer) .expect("sign image failed"); // Suppose there is only one SignatureLayer in the cosign image client .push_signature(None, &auth, &cosign_signature_image, vec![signature_layer]) .await?; Ok(()) } /// This function helps to get the auth of the given image reference. /// Now only `UsernamePassword` and `Anonymous` is supported. If an /// `IdentityToken` is found, this function will return an `Anonymous` /// auth. /// /// Any error will return an `Anonymous`. fn build_auth(reference: &OciReference) -> Auth { let server = reference .resolve_registry() .strip_suffix('/') .unwrap_or_else(|| reference.resolve_registry()); match docker_credential::get_credential(server) { Err(CredentialRetrievalError::ConfigNotFound) => Auth::Anonymous, Err(CredentialRetrievalError::NoCredentialConfigured) => Auth::Anonymous, Err(e) => { warn!("Error handling docker configuration file: {}", e); Auth::Anonymous } Ok(DockerCredential::UsernamePassword(username, password)) => { debug!("Found docker credentials"); Auth::Basic(username, password) } Ok(DockerCredential::IdentityToken(_)) => { warn!( "Cannot use contents of docker config, identity token not supported. Using anonymous auth" ); Auth::Anonymous } } } #[tokio::main] pub async fn main() { let cli = Cli::parse(); // setup logging let level_filter = if cli.verbose { "debug" } else { "info" }; let filter_layer = EnvFilter::new(level_filter); tracing_subscriber::registry() .with(filter_layer) .with(fmt::layer().with_writer(std::io::stderr)) .init(); match run_app(&cli).await { Ok(_) => println!("Costraints successfully applied"), Err(err) => { eprintln!("Image signing failed: {:?}", err); } } } ================================================ FILE: examples/cosign/trustroot/main.rs ================================================ use anyhow::Result; use sigstore::trust::{TrustRoot, sigstore::SigstoreTrustRoot}; #[tokio::main] pub async fn main() -> Result<()> { let root = SigstoreTrustRoot::new(None).await?; let fulcio_certs = root.fulcio_certs()?; println!("Fulcio Certificates found: {}", fulcio_certs.len()); let rekor_keys = root.rekor_keys()?; println!("Rekor Public Keys found: {}", rekor_keys.len()); Ok(()) } ================================================ FILE: examples/cosign/verify/README.md ================================================ This is a simple example program that shows how perform cosign verification. The program allows also to use annotation, in the same way as `cosign verify -a key=value` does. The program prints to the standard output all the Simple Signing objects that have been successfully verified. # Key based verification Create a keypair using the official cosign client: ```console cosign generate-key-pair ``` Sign a container image: ```console cosign sign --key cosign.key registry-testing.svc.lan/busybox ``` Verify the image signature using the example program defined in [main.rs](./main.rs): ```console cargo run --example verify \ --all-features \ -- \ -k cosign.pub \ --rekor-pub-key ~/.sigstore/root/targets/rekor.pub \ --fulcio-cert ~/.sigstore/root/targets/fulcio.crt.pem \ registry-testing.svc.lan/busybox ``` ================================================ FILE: examples/cosign/verify/main.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. extern crate sigstore; use sigstore::cosign::verification_constraint::cert_subject_email_verifier::StringVerifier; use sigstore::cosign::verification_constraint::{ AnnotationVerifier, CertSubjectEmailVerifier, CertSubjectUrlVerifier, CertificateVerifier, PublicKeyVerifier, VerificationConstraintVec, }; use sigstore::cosign::{CosignCapabilities, SignatureLayer}; use sigstore::crypto::SigningScheme; use sigstore::errors::SigstoreVerifyConstraintsError; use sigstore::registry::{ClientConfig, ClientProtocol, OciReference}; use sigstore::trust::sigstore::SigstoreTrustRoot; use std::time::Instant; use std::{collections::BTreeMap, fs}; extern crate anyhow; use anyhow::{Result, anyhow}; extern crate clap; use clap::Parser; extern crate tracing_subscriber; use tracing::{info, warn}; use tracing_subscriber::prelude::*; use tracing_subscriber::{EnvFilter, fmt}; #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Cli { /// Verification key #[clap(short, long, required(false))] key: Option, /// Path to verification certificate #[clap(long, required(false))] cert: Option, /// Path to certificate chain bundle file #[clap(long, required(false))] cert_chain: Option, /// Signing scheme when signing and verifying #[clap(long, required(false))] signing_scheme: Option, /// Fetch Rekor and Fulcio data from Sigstore's TUF repository" #[clap(long)] use_sigstore_tuf_data: bool, /// Rekor's public key ID (in hex format) and path to the public key, separated by the ':' symbol (e.g.: c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d:~/.sigstore/root/targets/rekor.pub) #[clap(long, required(false))] rekor_pub_keys: Vec, /// File containing Fulcio's certificate (e.g.: ~/.sigstore/root/targets/fulcio.crt.pem) #[clap(long, required(false))] fulcio_certs: Vec, /// The issuer of the OIDC token used by the user to authenticate against Fulcio #[clap(long, required(false))] cert_issuer: Option, /// The email expected in a valid fulcio cert #[clap(long, required(false))] cert_email: Option, /// The URL expected in a valid fulcio cert #[clap(long, required(false))] cert_url: Option, /// Annotations that have to be satisfied #[clap(short, long, required(false))] annotations: Vec, /// Enable verbose mode #[clap(short, long)] verbose: bool, /// Enable caching of registry operations #[clap(long)] enable_registry_caching: bool, /// Number of loops to be done. Useful only for testing `enable-registry-caching` #[clap(long, default_value = "1")] loops: u32, /// Name of the image to verify image: OciReference, /// Whether the registry uses HTTP #[clap(long)] http: bool, } async fn run_app( cli: &Cli, frd: &dyn sigstore::trust::TrustRoot, ) -> anyhow::Result<(Vec, VerificationConstraintVec)> { // Note well: this a limitation deliberately introduced by this example. if cli.cert_email.is_some() && cli.cert_url.is_some() { return Err(anyhow!( "The 'cert-email' and 'cert-url' flags cannot be used at the same time" )); } if cli.key.is_some() && cli.cert.is_some() { return Err(anyhow!("'key' and 'cert' cannot be used at the same time")); } let auth = &sigstore::registry::Auth::Anonymous; let mut oci_client_config = ClientConfig::default(); match cli.http { false => oci_client_config.protocol = ClientProtocol::Https, true => oci_client_config.protocol = ClientProtocol::Http, } let mut client_builder = sigstore::cosign::ClientBuilder::default().with_oci_client_config(oci_client_config); client_builder = client_builder.with_trust_repository(frd)?; let cert_chain: Option> = match cli.cert_chain.as_ref() { None => None, Some(cert_chain_path) => Some(parse_cert_bundle(cert_chain_path)?), }; if cli.enable_registry_caching { client_builder = client_builder.enable_registry_caching(); } let mut client = client_builder.build()?; // Build verification constraints let mut verification_constraints: VerificationConstraintVec = Vec::new(); if let Some(cert_email) = cli.cert_email.as_ref() { let issuer = cli .cert_issuer .as_ref() .map(|i| StringVerifier::ExactMatch(i.to_string())); verification_constraints.push(Box::new(CertSubjectEmailVerifier { email: StringVerifier::ExactMatch(cert_email.to_string()), issuer, })); } if let Some(cert_url) = cli.cert_url.as_ref() { let issuer = cli.cert_issuer.as_ref().map(|i| i.to_string()); if issuer.is_none() { return Err(anyhow!( "'cert-issuer' is required when 'cert-url' is specified" )); } verification_constraints.push(Box::new(CertSubjectUrlVerifier { url: cert_url.to_string(), issuer: issuer.unwrap(), })); } if let Some(path_to_key) = cli.key.as_ref() { let key = fs::read(path_to_key).map_err(|e| anyhow!("Cannot read key: {:?}", e))?; let verifier = match &cli.signing_scheme { Some(scheme) => { let signing_scheme = SigningScheme::try_from(&scheme[..]).map_err(anyhow::Error::msg)?; PublicKeyVerifier::new(&key, &signing_scheme) .map_err(|e| anyhow!("Cannot create public key verifier: {}", e))? } None => PublicKeyVerifier::try_from(&key) .map_err(|e| anyhow!("Cannot create public key verifier: {}", e))?, }; verification_constraints.push(Box::new(verifier)); } if let Some(path_to_cert) = cli.cert.as_ref() { let cert = fs::read(path_to_cert).map_err(|e| anyhow!("Cannot read cert: {:?}", e))?; let require_rekor_bundle = if !frd.rekor_keys()?.is_empty() { true } else { warn!("certificate based verification is weaker when Rekor integration is disabled"); false }; let verifier = CertificateVerifier::from_pem(&cert, require_rekor_bundle, cert_chain.as_deref()) .map_err(|e| anyhow!("Cannot create certificate verifier: {}", e))?; verification_constraints.push(Box::new(verifier)); } if !cli.annotations.is_empty() { let mut values: BTreeMap = BTreeMap::new(); for annotation in &cli.annotations { let tmp: Vec<_> = annotation.splitn(2, '=').collect(); if tmp.len() == 2 { values.insert(String::from(tmp[0]), String::from(tmp[1])); } } if !values.is_empty() { let annotations_verifier = AnnotationVerifier { annotations: values, }; verification_constraints.push(Box::new(annotations_verifier)); } } let image = &cli.image; let (cosign_signature_image, source_image_digest) = client.triangulate(image, auth).await?; let trusted_layers = client .trusted_signature_layers(auth, &source_image_digest, &cosign_signature_image) .await?; Ok((trusted_layers, verification_constraints)) } async fn fulcio_and_rekor_data(cli: &Cli) -> anyhow::Result> { if cli.use_sigstore_tuf_data { info!("Downloading data from Sigstore TUF repository"); let trust_root: sigstore::errors::Result = SigstoreTrustRoot::new(None).await; return Ok(Box::new(trust_root?)); }; let mut trust_root = sigstore::trust::ManualTrustRoot::default(); for id_and_path in cli.rekor_pub_keys.iter() { let (id, path) = id_and_path .split_once(':') .ok_or_else(|| anyhow!("Invalid format for rekor public key"))?; trust_root.rekor_keys.insert( id.to_string(), fs::read(path) .map_err(|e| anyhow!("Error reading rekor public key from disk: {}", e))?, ); } for path in cli.fulcio_certs.iter() { let cert_data = fs::read(path) .map_err(|e| anyhow!("Error reading fulcio certificate from disk: {}", e))?; let certificate = sigstore::registry::Certificate { encoding: sigstore::registry::CertificateEncoding::Pem, data: cert_data, }; trust_root.fulcio_certs.push(certificate.try_into()?); } Ok(Box::new(trust_root)) } #[tokio::main] pub async fn main() { let cli = Cli::parse(); // setup logging let level_filter = if cli.verbose { "debug" } else { "info" }; let filter_layer = EnvFilter::new(level_filter); tracing_subscriber::registry() .with(filter_layer) .with(fmt::layer().with_writer(std::io::stderr)) .init(); let frd = match fulcio_and_rekor_data(&cli).await { Ok(sr) => sr, Err(e) => { eprintln!("Cannot build sigstore repo data: {}", e); std::process::exit(1); } }; for n in 0..(cli.loops) { let now = Instant::now(); if cli.loops != 1 { println!("Loop {}/{}", n + 1, cli.loops); } match run_app(&cli, frd.as_ref()).await { Ok((trusted_layers, verification_constraints)) => { let filter_result = sigstore::cosign::verify_constraints( &trusted_layers, verification_constraints.iter(), ); match filter_result { Ok(()) => { println!("Image successfully verified"); } Err(SigstoreVerifyConstraintsError { unsatisfied_constraints, }) => { eprintln!("Image verification failed: not all constraints satisfied."); eprintln!("{:?}", unsatisfied_constraints); } } } Err(err) => { eprintln!("Image verification failed: {:?}", err); } } let elapsed = now.elapsed(); if cli.loops != 1 { println!("Elapsed: {:.2?}", elapsed); println!("------"); } } } fn parse_cert_bundle(bundle_path: &str) -> Result> { let data = fs::read(bundle_path).map_err(|e| anyhow!("Error reading {}: {}", bundle_path, e))?; let pems = pem::parse_many(data)?; Ok(pems .iter() .map(|pem| sigstore::registry::Certificate { encoding: sigstore::registry::CertificateEncoding::Der, data: pem.contents().to_vec(), }) .collect()) } ================================================ FILE: examples/cosign/verify-blob/.gitignore ================================================ signature certificate artifact.txt ================================================ FILE: examples/cosign/verify-blob/README.md ================================================ This example shows how to verify a blob signature that was created by the `cosign sign-blob` command. ### Create the artifact to be signed. ```console cd examples/cosign/verify-blob echo something > artifact.txt ``` ### Sign the artifact.txt file using cosign ``` cosign sign-blob \ --output-signature signature \ --output-certificate certificate \ artifact.txt Using payload from: artifact.txt Generating ephemeral keys... Retrieving signed certificate... Note that there may be personally identifiable information associated with this signed artifact. This may include the email address associated with the account with which you authenticate. This information will be used for signing this artifact and will be stored in public transparency logs and cannot be removed later. By typing 'y', you attest that you grant (or have permission to grant) and agree to have this information stored permanently in transparency logs. Are you sure you want to continue? (y/[N]): y Your browser will now be opened to: https://oauth2.sigstore.dev/auth/auth?access_type=online&client_id=sigstore&code_challenge=o2zGqxFdnIMy2n31excKZGDd25nj9bRocuCK_oSTKDk&code_challenge_method=S256&nonce=2MxS5IYq7wviqRPvAKMeSUcQiBS&redirect_uri=http%3A%2F%2Flocalhost%3A36653%2Fauth%2Fcallback&response_type=code&scope=openid+email&state=2MxS5NQBiv0oTvB0oU88qRbaKEk Successfully verified SCT... using ephemeral certificate: -----BEGIN CERTIFICATE----- MIICqTCCAi6gAwIBAgIUc4soYChsRq4lWUu990I7GrErO9IwCgYIKoZIzj0EAwMw NzEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MR4wHAYDVQQDExVzaWdzdG9yZS1pbnRl cm1lZGlhdGUwHhcNMjMwMzEzMTEzOTIwWhcNMjMwMzEzMTE0OTIwWjAAMFkwEwYH KoZIzj0CAQYIKoZIzj0DAQcDQgAE7zhgP7vhI8QzXm0nMC6wvj1c/82sRx4ozvIB 6od9xfiNofmjlDJtdG+IrObrxONhAXffZWDB2N8SmjAcHVz85qOCAU0wggFJMA4G A1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQU3U2j b80jcAEZIXWnZgIjGEJ39EcwHwYDVR0jBBgwFoAU39Ppz1YkEZb5qNjpKFWixi4Y ZD8wJwYDVR0RAQH/BB0wG4EZZGFuaWVsLmJldmVuaXVzQGdtYWlsLmNvbTAsBgor BgEEAYO/MAEBBB5odHRwczovL2dpdGh1Yi5jb20vbG9naW4vb2F1dGgwgYoGCisG AQQB1nkCBAIEfAR6AHgAdgDdPTBqxscRMmMZHhyZZzcCokpeuN48rf+HinKALynu jgAAAYbaxJHJAAAEAwBHMEUCIFsrHqZF6pqZotjvHvvSPxk7jdtWkAPLn55APKmj lD72AiEA3807EnFi2HLZcoP+85fmCH5awXDX1KLPUW7kibOwKpAwCgYIKoZIzj0E AwMDaQAwZgIxAOf6C68qm6r7Rovurc7j+JQkki8hsoWd68vC+VvSazSFMpCxrvm7 HlrW7oMAzjlCzwIxANQWgC60eNi7QNeqlMlo/UraZz8xFho2d0Fr5fa0ZfALBE82 I9TvCXsVua7/ERp+eQ== -----END CERTIFICATE----- tlog entry created with index: 15311440 MEYCIQDSsR/enheXGrFNLtgEVNLvLFTYPa1cWOTBZBqNYv/kQQIhALFxLx27ECqtVyM3jGedhharRngiHJ4EMdfvA6Bl3+pm ``` The above command will have saved two files, one containing the signature (which can also be seen as the last line of the output above), and one which contains the certificate. ### Verify using sigstore-rs: To verify the blob using this example use the following command: ```console cd examples/cosign/verify-blob cargo run --example verify-blob -- \ --certificate certificate \ --signature signature \ artifact.txt Verification succeeded ``` ### Verify using cosign To verify the blob using `cosign verify-blob` we need to specify a `--certificate-oidc-issuer` which currently can be one of: the following: * https://github.com/login/oauth * https://accounts.google.com * https://login.microsoftonline.com And we also have to specify the email address we used as the `--certificate-identity`: ```console cosign verify-blob \ --cert certificate \ --signature signature \ --certificate-identity \ --certificate-oidc-issuer https://github.com/login/oauth \ artifact.txt Verified OK ``` ================================================ FILE: examples/cosign/verify-blob/main.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. extern crate clap; extern crate sigstore; use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64_STD_ENGINE}; use clap::Parser; use sigstore::cosign::CosignCapabilities; use sigstore::cosign::client::Client; extern crate tracing_subscriber; use std::fs; use std::path::PathBuf; use tracing_subscriber::prelude::*; use tracing_subscriber::{EnvFilter, fmt}; #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Cli { /// The certificate generate from the `cosign sign-blob` command #[clap(short, long)] certificate: PathBuf, /// The signature generated from the `cosign sign-blob` command #[clap(long, required(false))] signature: PathBuf, /// The blob to verify blob: String, /// Enable verbose mode #[clap(short, long)] verbose: bool, } #[tokio::main] pub async fn main() { let cli = Cli::parse(); // setup logging let level_filter = if cli.verbose { "debug" } else { "info" }; let filter_layer = EnvFilter::new(level_filter); tracing_subscriber::registry() .with(filter_layer) .with(fmt::layer().with_writer(std::io::stderr)) .init(); // certificate may be PEM or "double base64 encoded PEM" (cosign). let cert_input = fs::read_to_string(&cli.certificate).expect("error reading certificate"); let certificate = match BASE64_STD_ENGINE.decode(cert_input.clone()) { Ok(res) => String::from_utf8(res).expect("error stringifying PEM certificate"), Err(_) => cert_input, }; let signature = fs::read_to_string(&cli.signature).expect("error reading signature"); let blob = fs::read(cli.blob.as_str()).expect("error reading blob file"); match Client::verify_blob(&certificate, signature.trim(), &blob) { Ok(_) => println!("Verification succeeded"), Err(e) => eprintln!("Verification failed {:?}", e), } } ================================================ FILE: examples/cosign/verify-bundle/.gitignore ================================================ artifact.bundle artifact.txt ================================================ FILE: examples/cosign/verify-bundle/README.md ================================================ This example shows how to verify a blob, using a bundle that was created by the `cosign sign-blob` command. ### Sign README.md file using cosign ``` cd examples/cosign/verify-bundle cosign sign-blob --bundle=artifact.bundle README.md ``` ### Verify using sigstore-rs: ```console cargo run --example verify-bundle -- \ --rekor-pub-key ~/.sigstore/root/targets/rekor.pub \ --bundle artifact.bundle \ README.md ``` ================================================ FILE: examples/cosign/verify-bundle/main.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64_STD_ENGINE}; use clap::Parser; use sigstore::cosign::CosignCapabilities; use sigstore::cosign::bundle::SignedArtifactBundle; use sigstore::cosign::client::Client; use sigstore::crypto::{CosignVerificationKey, SigningScheme}; use std::fs; use tracing_subscriber::prelude::*; use tracing_subscriber::{EnvFilter, fmt}; #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Cli { /// Path to bundle file #[clap(short, long)] bundle: String, /// Path to artifact to be verified blob: String, /// File containing Rekor's public key (e.g.: ~/.sigstore/root/targets/rekor.pub) #[clap(long, required = true)] rekor_pub_key: String, /// Rekor public key ID #[clap(long, required = true)] rekor_pub_key_id: String, /// Enable verbose mode #[clap(short, long)] verbose: bool, } #[tokio::main] pub async fn main() { let cli = Cli::parse(); // setup logging let level_filter = if cli.verbose { "debug" } else { "info" }; let filter_layer = EnvFilter::new(level_filter); tracing_subscriber::registry() .with(filter_layer) .with(fmt::layer().with_writer(std::io::stderr)) .init(); let rekor_pub_pem = fs::read_to_string(&cli.rekor_pub_key).expect("error reading rekor's public key"); let rekor_pub_key = CosignVerificationKey::from_pem(rekor_pub_pem.as_bytes(), &SigningScheme::default()) .expect("Cannot create Rekor verification key"); let bundle_json = fs::read_to_string(&cli.bundle).expect("error reading bundle json file"); let blob = fs::read(cli.blob.as_str()).expect("error reading blob file"); let rekor_pub_keys = [(cli.rekor_pub_key_id, rekor_pub_key)] .into_iter() .collect(); let bundle = SignedArtifactBundle::new_verified(&bundle_json, &rekor_pub_keys).unwrap(); // certificate in bundle is double base64 encoded, remove one layer: let cert_data = BASE64_STD_ENGINE .decode(bundle.cert) .expect("Error decoding base64 certificate"); let cert = String::from_utf8(cert_data).expect("error stringifying PEM certificate"); match Client::verify_blob(&cert, &bundle.base64_signature, &blob) { Ok(_) => println!("Verification succeeded"), Err(e) => eprintln!("Verification failed: {}", e), } } ================================================ FILE: examples/cosign/verify-bundle/run.sh ================================================ BLOB="README.md" BUNDLE="artifact.bundle" echo -e "\nSign README.md file using sign-blob" cosign sign-blob --bundle=$BUNDLE $BLOB echo -e "\nRun examples/cosign/verify-bundle" cargo run --example verify-bundle -- \ --rekor-pub-key ~/.sigstore/root/targets/rekor.pub \ --bundle $BUNDLE \ $BLOB ================================================ FILE: examples/fulcio/cert/main.rs ================================================ use pkcs8::der::Decode; use sigstore::crypto::SigningScheme; use sigstore::fulcio::oauth::OauthTokenProvider; use sigstore::fulcio::{FULCIO_ROOT, FulcioClient, TokenProvider}; use url::Url; use x509_cert::Certificate; use x509_cert::ext::pkix::SubjectAltName; #[tokio::main] async fn main() { let fulcio = FulcioClient::new( Url::parse(FULCIO_ROOT).unwrap(), TokenProvider::Oauth(OauthTokenProvider::default()), ); if let Ok((_signer, cert)) = fulcio .request_cert(SigningScheme::ECDSA_P256_SHA256_ASN1) .await { println!("Received certificate chain"); let pems = pem::parse_many(cert.as_ref()).expect("parse pem failed"); for pem in &pems { let cert = Certificate::from_der(pem.contents()).expect("parse certificate from der"); let (_, san) = cert .tbs_certificate .get::() .expect("get SAN failed") .expect("No SAN found"); for name in &san.0 { println!("SAN: {name:?}"); } } } } ================================================ FILE: examples/key_interface/README.md ================================================ # Example Key Interface This is a simple example program that shows how to use the key interfaces. The key interfaces covers: * Generating Asymmetric encryption key pair * Signing with private key * Exporting the (encrypted) private/public key * Importing the (encrypted) private/public key * Verifying signature with public key The basic implementation for key-interface can be shown in the following diagram ![key_interface](key_interface.drawio.svg) The exposed interfaces (marked as `pub`) include: * `SigStoreSigner` enum: wrapper for `Signer`s of different kinds of signing algorithm. * `SigStoreKeyPair` enum: wrapper for `KeyPair`s of different kinds of asymmetric encryption algorithm. * `SigningScheme` enum: Different kinds of signing algorithm. * `CosignVerificationKey` struct: Public key types to verify signatures for different signing algorithm. To show the different usages for them, there will be three typical scenarios. ## Key Pair Generation, Signing and Verification This example shows the following operations * Generating Asymmetric encryption key pair due to given `SigningScheme`. * Signing the given test data using private key. The signature will be printed in hex. * Verifying the signature generated. The signing process is performed by `SigStoreSigner`. The verifying process is performed by `CosignVerificationKey`. ### Run the example case The following example will create a ECDSA_P256_ASN1 keypair and sign the given data. ```bash cargo run --example key_pair_gen_sign_verify ``` This example includes the following steps: * Randomly generate an `ECDSA_P256_ASN1` key pair, which is represented as `signer` of type `SigStoreSigner` and includes a private key and a public key. Here, the type of the key pair is influenced by the given `SigningScheme`. * Sign the given data `DATA_TO_BE_SIGNED` using the `signer`'s private key. * Derive [`verification_key`](../../src/crypto/verification_key.rs) from the `signer`. * Verify the signature generated before using the `verification_key`. ## Key Pair Generation and Exporting This example shows the following operations * Generating Asymmetric encryption key pair due to given `SigningScheme`. * Export the public key in both DER and PEM format. * Export the private key in both DER and PEM format. * Export the encrypted private key in PEM format. The key-related operations are performed by `SigStoreKeyPair`. ### Run the example case The following example will create a ECDSA_P256_ASN1 keypair and sign the given data. ```bash cargo run --example key_pair_gen_and_export ``` This example includes the following steps: * Randomly generate an `ECDSA_P256_ASN1` key pair, which is represented as `signer` of type `SigStoreSigner` and includes a private key and a public key. Here, the type of the key pair is influenced by the given `SigningScheme`. * Export the public key in PEM format and DER format. The result will be printed (PEM as string, DER as hex). * Export the private key in PEM format and DER format. The result will be printed (PEM as string, DER as hex). * Export the encrypted private key in PEM format. The result will be printed. ## Key Pair Importing This example shows the following operations * Import the public key in both DER and PEM format to `CosignVerificationKey`. * Import the private key in both DER and PEM format to `SigStoreKeyPair/ECDSAKeys`. * Import the encrypted private key in PEM format to `SigStoreKeyPair/ECDSAKeys`. * Convert the `SigStoreKeyPair` to `SigStoreSigner`. ### Run the example case The following example will create a ECDSA_P256_ASN1 keypair and sign the given data. ```bash cargo run --example key_pair_import ``` This example includes the following steps: * Import the public key `ECDSA_P256_ASN1_PUBLIC_PEM.pub` as `CosignVerificationKey`. * Import the public key `ECDSA_P256_ASN1_PUBLIC_DER.pub` as `CosignVerificationKey`. * Import the private key `ECDSA_P256_ASN1_PRIVATE_PEM.key` as `SigStoreKeyPair`. * Import the private key `ECDSA_P256_ASN1_PRIVATE_PEM.key` as `ECDSAKeys`. * Import the private key `ECDSA_P256_ASN1_PRIVATE_DER.key` as `SigStoreKeyPair`. * Import the private key `ECDSA_P256_ASN1_PRIVATE_DER.key` as `ECDSAKeys`. * Import the encrypted private key `ECDSA_P256_ASN1_ENCRYPTED_PRIVATE_PEM.key` as `SigStoreKeyPair`. * Import the encrypted private key `ECDSA_P256_ASN1_ENCRYPTED_PRIVATE_PEM.key` as `ECDSAKeys`. * Convert the last `SigStoreKeyPair` to `SigStoreSigner`. ================================================ FILE: examples/key_interface/key_pair_gen_and_export/main.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use anyhow::Result; use sigstore::crypto::SigningScheme; const PASSWORD: &str = "example password"; fn main() -> Result<()> { let signer = SigningScheme::ECDSA_P256_SHA256_ASN1.create_signer()?; println!("Created a new key pair for ECDSA_P256_SHA256_ASN1.\n"); let key_pair = signer.to_sigstore_keypair()?; println!("Derived `SigStoreKeyPair` from the `SigStoreSigner`.\n"); let pub_pem = key_pair.public_key_to_pem()?; println!("Exported the public key in PEM format."); println!("public key:\n {}", pub_pem); let pub_der = key_pair.public_key_to_der()?; println!("Exported the public key in DER format."); println!("public key:\n {:x?}", pub_der); let pri_pem = key_pair.private_key_to_pem()?; println!("Exported the private key in PEM format."); println!("private key:\n {}", *pri_pem); let pri_der = key_pair.private_key_to_der()?; println!("Exported the private key in DER format."); println!("private key:\n {:x?}", *pri_der); let encrypted_pri_pem = key_pair.private_key_to_encrypted_pem(PASSWORD.as_bytes())?; println!("Exported the encrypted private key in PEM format."); println!("private key:\n {}", *encrypted_pri_pem); Ok(()) } ================================================ FILE: examples/key_interface/key_pair_gen_sign_verify/main.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use anyhow::{Result, anyhow}; use sigstore::crypto::{Signature, SigningScheme}; const DATA_TO_BE_SIGNED: &str = "this is an example data to be signed"; fn main() -> Result<()> { let signer = SigningScheme::ECDSA_P256_SHA256_ASN1.create_signer()?; println!("Created a new key pair for ECDSA_P256_SHA256_ASN1.\n"); let signature_data = signer.sign(DATA_TO_BE_SIGNED.as_bytes())?; println!("Signed the example data."); println!("Data: {}", DATA_TO_BE_SIGNED); println!("Signature: {:x?}\n", &signature_data); let verification_key = signer.to_verification_key()?; println!("Derive verification key from the signer.\n"); println!("Verifying the signature of the example data..."); match verification_key.verify_signature( Signature::Raw(&signature_data), DATA_TO_BE_SIGNED.as_bytes(), ) { Ok(_) => { println!("Verification Succeeded."); Ok(()) } Err(e) => Err(anyhow!("Verifycation failed: {}", e)), } } ================================================ FILE: examples/key_interface/key_pair_import/ECDSA_P256_ASN1_ENCRYPTED_PRIVATE_PEM.key ================================================ -----BEGIN ENCRYPTED COSIGN PRIVATE KEY----- eyJrZGYiOnsibmFtZSI6InNjcnlwdCIsInBhcmFtcyI6eyJOIjozMjc2OCwiciI6 OCwicCI6MX0sInNhbHQiOiI3eWtwR1NRNVBpVWJZb1B0eXZ4cnhHWjcrM2NtZmdM WmwwQnhDQlVHRUdZPSJ9LCJjaXBoZXIiOnsibmFtZSI6Im5hY2wvc2VjcmV0Ym94 Iiwibm9uY2UiOiI0N2dPOVNCd0FueXVOZlQxVzlzcFVYOGxCREVnckgwZyJ9LCJj aXBoZXJ0ZXh0IjoiRHhmenJlWis5eXNLdVlHOU02L0F1dVl1bVVFa2tHcFdWUmhK V242dGp0VW1RTzBqNTJSa1RXZTYwVTk2bFMzelVldm9BUGU2MjhPMHFQODlkdkwv MEJObjljdWZYUVZObnAxVWJsUVBaQm9tRFRsUFR6NnZFa3doMS9XTFhTa2NKekdW ZnpqN3ZQSDRaVFhqRjBVRnRqYzZ6QmhqZFk5ZjhnRklzVERiYSs3S013eWxPUGhW ditpakl3R0R5Zk43RGNlWTJzYTdNZHM5Q2c9PSJ9 -----END ENCRYPTED COSIGN PRIVATE KEY----- ================================================ FILE: examples/key_interface/key_pair_import/ECDSA_P256_ASN1_PRIVATE_PEM.key ================================================ -----BEGIN PRIVATE KEY----- MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQg5qt7YAIL9zSg38Pi 5DX7rHjEcQAZkDk5MulVsr6x3QehRANCAASXe5tGHMHmug4BWmGl2HtIJlG8AIEV pWZ895mqN6Yv2X6HA1n7yxjDQdqJMmFmvQm9C7Z8HGR3kbj1LQyi+DaY -----END PRIVATE KEY----- ================================================ FILE: examples/key_interface/key_pair_import/ECDSA_P256_ASN1_PUBLIC_PEM.pub ================================================ -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE10MJqZ6tgxCxOvANHgfKMY90bso8 H+Iq3rPfT6GrFbYAgckw24H69hgnTHrujYAtjhK6csqLXkgwFzYh2Hdckw== -----END PUBLIC KEY----- ================================================ FILE: examples/key_interface/key_pair_import/main.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use anyhow::{Result, bail}; use sigstore::crypto::{ CosignVerificationKey, SigningScheme, signing_key::{SigStoreKeyPair, ecdsa::ECDSAKeys}, }; const PASSWORD: &str = "password"; const ECDSA_P256_ASN1_PUBLIC_PEM: &[u8] = include_bytes!("./ECDSA_P256_ASN1_PUBLIC_PEM.pub"); const ECDSA_P256_ASN1_PUBLIC_DER: &[u8] = include_bytes!("./ECDSA_P256_ASN1_PUBLIC_DER.pub"); const ECDSA_P256_ASN1_PRIVATE_PEM: &[u8] = include_bytes!("./ECDSA_P256_ASN1_PRIVATE_PEM.key"); const ECDSA_P256_ASN1_PRIVATE_DER: &[u8] = include_bytes!("./ECDSA_P256_ASN1_PRIVATE_DER.key"); const ECDSA_P256_ASN1_ENCRYPTED_PRIVATE_PEM: &[u8] = include_bytes!("./ECDSA_P256_ASN1_ENCRYPTED_PRIVATE_PEM.key"); fn main() -> Result<()> { let _ = CosignVerificationKey::from_pem(ECDSA_P256_ASN1_PUBLIC_PEM, &SigningScheme::default())?; println!( "Imported PEM encoded public key as CosignVerificationKey using ECDSA_P256_ASN1_PUBLIC_PEM as verification algorithm." ); let _ = CosignVerificationKey::from_der(ECDSA_P256_ASN1_PUBLIC_DER, &SigningScheme::default())?; println!( "Imported DER encoded public key as CosignVerificationKey using ECDSA_P256_ASN1_PUBLIC_PEM as verification algorithm." ); let _ = CosignVerificationKey::try_from_pem(ECDSA_P256_ASN1_PUBLIC_PEM)?; println!("Imported PEM encoded public key as CosignVerificationKey."); let _ = CosignVerificationKey::try_from_der(ECDSA_P256_ASN1_PUBLIC_DER)?; println!("Imported DER encoded public key as CosignVerificationKey."); let _ = SigStoreKeyPair::from_pem(ECDSA_P256_ASN1_PRIVATE_PEM)?; println!("Imported PEM encoded private key as SigStoreKeyPair."); let _ = ECDSAKeys::from_pem(ECDSA_P256_ASN1_PRIVATE_PEM)?; println!("Imported PEM encoded private key as ECDSAKeys."); let _ = SigStoreKeyPair::from_der(ECDSA_P256_ASN1_PRIVATE_DER)?; println!("Imported DER encoded private key as SigStoreKeyPair."); let _ = ECDSAKeys::from_der(ECDSA_P256_ASN1_PRIVATE_DER)?; println!("Imported DER encoded private key as ECDSAKeys."); let key_pair = SigStoreKeyPair::from_encrypted_pem( ECDSA_P256_ASN1_ENCRYPTED_PRIVATE_PEM, PASSWORD.as_bytes(), )?; println!("Imported encrypted PEM encoded private key as SigStoreKeyPair."); let ecdsa_key_pair = ECDSAKeys::from_encrypted_pem(ECDSA_P256_ASN1_ENCRYPTED_PRIVATE_PEM, PASSWORD.as_bytes())?; println!("Imported encrypted PEM encoded private key as ECDSAKeys."); let _ = ecdsa_key_pair.to_sigstore_signer()?; println!("Converted ECDSAKeys to SigStoreSigner."); match key_pair { SigStoreKeyPair::ECDSA(inner) => { inner.to_sigstore_signer()?; println!("Converted SigStoreKeyPair to SigStoreSigner."); } _ => bail!("Wrong key pair type."), } Ok(()) } ================================================ FILE: examples/openidflow/README.md ================================================ # Open ID Connect Flow for Fulcio Signing Certificates This is an example of the fulcio OpenID connect flow. The general idea is to return an access_token and the email via a scope. Both values can then be made to form a POST request to fulcio for a software signing certificate `cargo run --example openidconnect --all-features` The implementation contains a `redirect_listener` function that will create a local listening server to incept the ID token and scopes returned from sigstores OIDC service. However should you prefer, you can implement your own redirect service and simply pass along the required values: * client: CoreClient, * nonce: Nonce, * pkce_verifier: PkceCodeVerifier ================================================ FILE: examples/openidflow/openidconnect/main.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use anyhow::Result; use sigstore::oauth; fn main() -> Result<(), anyhow::Error> { let oidc_url = oauth::openidflow::OpenIDAuthorize::new( "sigstore", "", "https://oauth2.sigstore.dev/auth", "http://localhost:8080", ) .auth_url(); match oidc_url.as_ref() { Ok(url) => { webbrowser::open(url.0.as_ref())?; println!( "Open this URL in a browser if it does not automatically open for you:\n{}\n", url.0 ); } Err(e) => println!("{}", e), } let oidc_url = oidc_url?; let result = oauth::openidflow::RedirectListener::new( "127.0.0.1:8080", oidc_url.1, // client oidc_url.2, // nonce oidc_url.3, // pkce_verifier ) .redirect_listener(); match result { Ok((token_response, id_token)) => { println!("Email {:?}", token_response.email().unwrap()); println!( "Access Token:{:?}", token_response.access_token_hash().unwrap() ); println!("id_token: {:?}", id_token.to_string()); } Err(err) => { println!("{}", err); } } anyhow::Ok(()) } ================================================ FILE: examples/rekor/README.md ================================================ # Rekor Transparency Log Client The following examples all interface with the Rekor Transparency Log Client. ================================================ FILE: examples/rekor/create_log_entry/main.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use base64::{Engine as _, engine::general_purpose}; use sha2::Digest; use sha2::Sha256; use sigstore::crypto::SigningScheme; use sigstore::crypto::signing_key::SigStoreSigner; use sigstore::rekor::apis::{configuration::Configuration, entries_api}; use sigstore::rekor::models::{ ProposedEntry, hashedrekord::{AlgorithmKind, Data, Hash, PublicKey, Signature, Spec}, }; use clap::{Arg, Command}; async fn create_signer() -> SigStoreSigner { SigningScheme::ECDSA_P256_SHA256_ASN1 .create_signer() .expect("cannot create sigstore signer") } // function to fetch data and generate the hash of it to be signed and upload to the transparency log async fn get_file_sha256sum(url: String) -> Result<(Vec, String), reqwest::Error> { let body = reqwest::get(&url).await?.bytes().await?; let mut digester = Sha256::new(); digester.update(body.clone()); let digest = format!("{:x}", digester.finalize()); Ok((body.to_vec(), digest)) } #[tokio::main] async fn main() { /* Creates an entry in the transparency log. If no command line arguments is provided, the program will generate a key pair, download the file available at URL constant, sign it and create an entry in the transparency log. In the other hand, if the user sets the command line flags, the program will use that info to create the entry. Therefore, if the user use information of an entry already present in the transparency log, this program can print an error. See an example: Example command : cargo run --example create_log_entry -- \ --hash c7ead87fa5c82d2b17feece1c2ee1bda8e94788f4b208de5057b3617a42b7413\ --url https://raw.githubusercontent.com/jyotsna-penumaka/rekor-rs/rekor-functionality/test_data/data\ --public_key LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFeEhUTWRSQk80ZThCcGZ3cG5KMlozT2JMRlVrVQpaUVp6WGxtKzdyd1lZKzhSMUZpRWhmS0JZclZraGpHL2lCUjZac2s3Z01iYWZPOG9FM01lUEVvWU93PT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg==\ --signature MEUCIHWACbBnw+YkJCy2tVQd5i7VH6HgkdVBdP7HRV1IEsDuAiEA19iJNvmkE6We7iZGjHsTkjXV8QhK9iXu0ArUxvJF1N8=\ --api_version 0.0.1 When the example code is run with the default values, the following error message gets returned: Err( ResponseError( ResponseContent { status: 409, content: "{\"code\":409,\"message\":\"An equivalent entry already exists in the transparency log with UUID 1377da9d9dbad451a5a8acdd28add750815d34e8205f1b8a35a67b8a27dae9bf\"}\n", entity: Some( Status400( Error { code: Some( 409, ), message: Some( "An equivalent entry already exists in the transparency log with UUID 1377da9d9dbad451a5a8acdd28add750815d34e8205f1b8a35a67b8a27dae9bf", ), }, ), ), }, ), ) This is because an equivalent entry with the provided meta data already exists in the transparency log. When you use the example code to create a new entry with fresh set of input values or leaving the program to generate the required data, you should be able to run the code without any errors. See an example: Example command : cargo run --example create_log_entry -- The expected output will be something similar to: Ok( LogEntry { uuid: "24296fb24b8ad77afa01e2c1f5555326e4fc32a942b40a2d798ae72a8f10c801f6e8dee771dfbacc", attestation: None, body: "eyJhcGlWZXJzaW9uIjoiMC4wLjEiLCJraW5kIjoiaGFzaGVkcmVrb3JkIiwic3BlYyI6eyJkYXRhIjp7Imhhc2giOnsiYWxnb3JpdGhtIjoic2hhMjU2IiwidmFsdWUiOiJjN2VhZDg3ZmE1YzgyZDJiMTdmZWVjZTFjMmVlMWJkYThlOTQ3ODhmNGIyMDhkZTUwNTdiMzYxN2E0MmI3NDEzIn19LCJzaWduYXR1cmUiOnsiY29udGVudCI6Ik1FVUNJQWh4elhWZnRyMWpyS0k3dEluWW5iR1pNMDZybFhpQ1lUMTRJbFdFazF4QkFpRUE0SGllM2l4cTRyOG9tVVgwclRDV2o3UmducVhqUEFZTmlkaDlQVllrQXFVPSIsInB1YmxpY0tleSI6eyJjb250ZW50IjoiTFMwdExTMUNSVWRKVGlCUVZVSk1TVU1nUzBWWkxTMHRMUzBLVFVacmQwVjNXVWhMYjFwSmVtb3dRMEZSV1VsTGIxcEplbW93UkVGUlkwUlJaMEZGYjNCRmJGTlJlbGRTTUM5Sk5raEJZbm9yV21sVmFsVlhWR051WlFvdlUwUndWV1ZOVUhGR04wUXlZbU5xV2tKRlYweGhiak5XTjB3cmVHNW5jVFJHYW1wRGVtdHlLMFkwYlc5bFNEaFJTbWhNYUV0SlQzWlJQVDBLTFMwdExTMUZUa1FnVUZWQ1RFbERJRXRGV1MwdExTMHRDZz09In19fX0=", integrated_time: 1675277501, log_i_d: "c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d", log_index: 12425816, verification: Verification { inclusion_proof: Some( InclusionProof { hashes: [ "a0b75928818e5fa302c3690a895e0385803a079391a79cf9f25b08a51eebc338", "3a39532ac61bf4d3f9a982e38f1b3166a3222e9d8a081d31f67d0da745117dc5", "96ff049c2d122233d7e44b49d5df16b0901dbf85523d90bd739a3a25d26a974c", "09805d1e21e395d8b82e7269c7ddff2941564f925145196273a993c452059a85", "37ca98bdb80bdc45768539d15117b2b57531b3ad1f051aaa4f58d030f868f86e", "e81e08afa83961c36e2a6961f66859620c9ee4ed9be5631ace9f3c27c72f66fb", "2f01e165e3758aba5fd53d0c03c88b84ccbed7334173d9159d87fed5930bfe03", "d0509b1c0bde3ba0517efcc5d3e8d2007fe86e5e055cebd5e94307cd0394c22d", "8a84151f9b8fbbf7b3e77cb658535ec46d27c1cdd1cab714558dc51114922e7a", "5eb43eca7a763e2eaafb2bc2fc963e7802283cf1a9076638242177b1669942c0", "5523cd019fea93d01834fc429f708b700aeb72c835a73161cdb9003f8f4e8072", "4b6df664d9552bc24d48a4c7d5659a8270065e1fedbc39103b010ab235a87850", "616429db6c7d20c5b0eff1a6e512ea57a0734b94ae0bc7c914679463e01a7fba", "5a4ad1534b1e770f02bfde0de15008a6971cf1ffbfa963fc9c2a644973a8d2d1", ], log_index: 8262385, root_hash: "41b3e1294d122b2190396de7de92731a378378ac2d7f620eb01d653838e88219", tree_size: 8262387, }, ), signed_entry_timestamp: "MEUCIG/vIwjuQoiVZtxw48KSMYyxXlpHA/y8kxYTJh46qbejAiEAyFAP5oQjxT6xFK7wKYW33sa/5wFQvqtKsdTLnitrzWA=", }, }, ) */ const URL: &str = "https://raw.githubusercontent.com/jyotsna-penumaka/rekor-rs/rekor-functionality/test_data/data"; const API_VERSION: &str = "0.0.1"; let data_job = get_file_sha256sum(URL.to_string()); let signer_job = create_signer(); let matches = Command::new("cmd") .arg(Arg::new("hash") .long("hash") .value_name("HASH") .help("hash of the artifact")) .arg(Arg::new("url") .long("url") .value_name("URL") .help("url containing the contents of the artifact (raw github url)")) .arg(Arg::new("public_key") .long("public_key") .value_name("PUBLIC_KEY") .help("base64 encoded public_key. Look at https://raw.githubusercontent.com/jyotsna-penumaka/rekor-rs/rekor-functionality/test_data/create_log_entry.md for more details on generating keys.")) .arg(Arg::new("signature") .long("signature") .value_name("SIGNATURE") .help("base64 encoded signature of the artifact. Look at https://raw.githubusercontent.com/jyotsna-penumaka/rekor-rs/rekor-functionality/test_data/create_log_entry.md for more details on generating keys.")) .arg(Arg::new("api_version") .long("api_version") .value_name("API_VERSION") .help("Rekor-rs open api version")); let flags = matches.get_matches(); let configuration = Configuration::default(); let (data_bytes, digest) = data_job.await.expect("Cannot get data digest"); let signer = signer_job.await; let public_key_base64 = general_purpose::STANDARD.encode( signer .to_sigstore_keypair() .expect("Cannot get sigstore keypair") .public_key_to_pem() .expect("Cannot set public key"), ); let sig = general_purpose::STANDARD.encode(signer.sign(&data_bytes).expect("Cannot sign data")); let hash = Hash::new( AlgorithmKind::sha256, flags .get_one::("hash") .unwrap_or(&digest) .to_owned(), ); let data = Data::new(hash); let public_key = PublicKey::new( flags .get_one::("public_key") .unwrap_or(&public_key_base64) .to_owned(), ); let signature = Signature::new( flags.get_one("signature").unwrap_or(&sig).to_owned(), public_key, ); let spec = Spec::new(signature, data); let proposed_entry = ProposedEntry::Hashedrekord { api_version: flags .get_one::("api_version") .unwrap_or(&API_VERSION.to_string()) .to_owned(), spec, }; let log_entry = entries_api::create_log_entry(&configuration, proposed_entry).await; println!("{:#?}", log_entry); } ================================================ FILE: examples/rekor/get_log_entry_by_index/main.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use clap::{Arg, Command}; use sigstore::rekor::apis::{configuration::Configuration, entries_api}; use sigstore::rekor::models::log_entry::LogEntry; use std::str::FromStr; #[tokio::main] async fn main() { /* Retrieves an entry and inclusion proof from the transparency log (if it exists) by index Example command : cargo run --example get_log_entry_by_index -- --log_index 99 */ let matches = Command::new("cmd").arg( Arg::new("log_index") .long("log_index") .value_name("LOG_INDEX") .help("log_index of the artifact"), ); // The following default value will be used if the user does not input values using cli flags const LOG_INDEX: &str = "1"; let flags = matches.get_matches(); let index = i32::from_str( flags .get_one::("log_index") .unwrap_or(&LOG_INDEX.to_string()), ) .unwrap(); let configuration = Configuration::default(); let message: LogEntry = entries_api::get_log_entry_by_index(&configuration, index) .await .unwrap(); println!("{:#?}", message); } ================================================ FILE: examples/rekor/get_log_entry_by_uuid/main.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use clap::{Arg, Command}; use sigstore::rekor::apis::{configuration::Configuration, entries_api}; use sigstore::rekor::models::log_entry::LogEntry; #[tokio::main] async fn main() { /* Get log entry and information required to generate an inclusion proof for the entry in the transparency log Example command : cargo run --example get_log_entry_by_uuid -- --uuid 073970a07c978b7a9ff15b69fe15d87dfb58fd5756086e3d1fb671c2d0bd95c0 */ let matches = Command::new("cmd").arg( Arg::new("uuid") .long("uuid") .value_name("UUID") .help("uuid of the artifact"), ); // The following default value will be used if the user does not input values using cli flags const UUID: &str = "073970a07c978b7a9ff15b69fe15d87dfb58fd5756086e3d1fb671c2d0bd95c0"; let flags = matches.get_matches(); let uuid = flags .get_one::("uuid") .unwrap_or(&UUID.to_string()) .to_owned(); let configuration = Configuration::default(); let message: LogEntry = entries_api::get_log_entry_by_uuid(&configuration, &uuid) .await .unwrap(); println!("{:#?}", message); } ================================================ FILE: examples/rekor/get_log_info/main.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use sigstore::rekor::apis::{configuration::Configuration, tlog_api}; use sigstore::rekor::models::LogInfo; #[tokio::main] async fn main() { /* Gets information about the current state of the transparency log. Returns the current root hash and size of the merkle tree used to store the log entries. Example command : cargo run --example get_log_info The server might return an error sometimes, this is because the result depends on the kind of rekor object that gets returned. */ let configuration = Configuration::default(); let log_info: LogInfo = tlog_api::get_log_info(&configuration).await.unwrap(); println!("{:#?}", log_info); } ================================================ FILE: examples/rekor/get_log_proof/main.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use clap::{Arg, Command}; use sigstore::rekor::apis::{configuration::Configuration, tlog_api}; use sigstore::rekor::models::ConsistencyProof; use std::str::FromStr; #[tokio::main] async fn main() { /* Get information required to generate a consistency proof for the transparency log. Returns a list of hashes for specified tree sizes that can be used to confirm the consistency of the transparency log. Example command : cargo run --example get_log_proof -- --last_size 10 cargo run --example get_log_proof -- --last_size 10 --first_size 1 */ let matches = Command::new("cmd") .arg(Arg::new("last_size") .long("last_size") .value_name("LAST_SIZE") .help("The size of the tree that you wish to prove consistency to")) .arg(Arg::new("first_size") .long("first_size") .value_name("FIRST_SIZE") .help("The size of the tree that you wish to prove consistency from (1 means the beginning of the log). Defaults to 1. To use the default value, do not input any value")) .arg(Arg::new("tree_id") .long("tree_id") .value_name("TREE_ID") .help("The tree ID of the tree that you wish to prove consistency for. To use the default value, do not input any value.")); let configuration = Configuration::default(); let flags = matches.get_matches(); // The following default value will be used if the user does not input values using cli flags const LAST_SIZE: &str = "10"; let log_proof: ConsistencyProof = tlog_api::get_log_proof( &configuration, i32::from_str( flags .get_one::("last_size") .unwrap_or(&LAST_SIZE.to_string()), ) .unwrap(), flags.get_one::("first_size").map(|s| s.as_str()), flags.get_one::("tree_id").map(|s| s.as_str()), ) .await .unwrap(); println!("{:#?}", log_proof); } ================================================ FILE: examples/rekor/get_public_key/main.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::{fs::write, process}; use clap::{Arg, Command}; use sigstore::rekor::apis::{configuration::Configuration, pubkey_api}; #[tokio::main] async fn main() { /* Returns the public key that can be used to validate the signed tree head Example command : cargo run --example get_public_key */ let matches = Command::new("cmd") .arg(Arg::new("tree_id") .long("tree_id") .value_name("TREE_ID") .help("The tree ID of the tree that you wish to prove consistency for. To use the default value, do not input any value.")) .arg(Arg::new("output") .short('o') .long("output") .value_name("OUTPUT_FILE") .num_args(0..=1) .require_equals(true) .default_missing_value("key.pub") .help("The path to the output file that you wish to store the public key in. To use the default value (pub.key), do not provide OUTPUT_FILE.")); let flags = matches.get_matches(); let configuration = Configuration::default(); let pubkey = pubkey_api::get_public_key( &configuration, flags.get_one::("tree_id").map(|s| s.as_str()), ) .await .expect("Unable to retrieve public key"); if let Some(out_path) = flags.get_one::("output") { match write(out_path, pubkey) { Ok(_) => (), Err(e) => { eprintln!("Could not write to {out_path}: {e}"); process::exit(1); } } } else { print!("{}", pubkey); } } ================================================ FILE: examples/rekor/merkle_proofs/consistency.rs ================================================ use clap::Parser; use sigstore::crypto::CosignVerificationKey; use sigstore::rekor::apis::configuration::Configuration; use sigstore::rekor::apis::tlog_api::{get_log_info, get_log_proof}; use std::fs::read_to_string; use std::path::PathBuf; #[derive(Parser)] struct Args { #[arg(long, value_name = "REKOR PUBLIC KEY")] rekor_key: PathBuf, #[arg(long, value_name = "HEX ENCODED HASH")] old_root: String, #[arg(long)] old_size: u64, #[arg(long, value_name = "TREE ID")] tree_id: Option, } #[tokio::main] async fn main() -> Result<(), Box> { let args = Args::parse(); let tree_id = args.tree_id.as_deref(); // read verification key let rekor_key = read_to_string(&args.rekor_key) .map_err(Into::into) .and_then(|k| CosignVerificationKey::from_pem(k.as_bytes(), &Default::default()))?; // fetch log info let rekor_config = Configuration::default(); let log_info = get_log_info(&rekor_config).await?; let proof = get_log_proof( &rekor_config, log_info.tree_size as _, Some(&args.old_size.to_string()), tree_id, ) .await?; let old_root_bytes: [u8; 32] = hex::FromHex::from_hex(&args.old_root)?; log_info .verify_consistency(args.old_size, &old_root_bytes, &proof, &rekor_key) .expect("failed to verify log consistency"); println!("Successfully verified consistency"); Ok(()) } ================================================ FILE: examples/rekor/merkle_proofs/inclusion.rs ================================================ use clap::Parser; use sigstore::crypto::CosignVerificationKey; use sigstore::rekor::apis::configuration::Configuration; use sigstore::rekor::apis::entries_api::get_log_entry_by_index; use std::fs::read_to_string; use std::path::PathBuf; #[derive(Parser)] struct Args { #[arg(long, value_name = "INDEX")] log_index: usize, #[arg(long, value_name = "REKOR PUBLIC KEY")] rekor_key: PathBuf, } #[tokio::main] async fn main() -> Result<(), Box> { let args = Args::parse(); // read verification key let rekor_key = read_to_string(&args.rekor_key) .map_err(Into::into) .and_then(|k| CosignVerificationKey::from_pem(k.as_bytes(), &Default::default()))?; // fetch entry from log let rekor_config = Configuration::default(); let log_entry = get_log_entry_by_index(&rekor_config, args.log_index as i32).await?; // verify inclusion with key log_entry .verify_inclusion(&rekor_key) .expect("failed to verify log inclusion"); println!("Successfully verified inclusion."); Ok(()) } ================================================ FILE: examples/rekor/search_index/main.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use clap::{Arg, Command}; use sigstore::rekor::apis::{configuration::Configuration, index_api}; use sigstore::rekor::models::{ SearchIndex, search_index_public_key, search_index_public_key::Format, }; #[tokio::main] async fn main() { /* Searches index by entry metadata Example command: cargo run --example search_index -- \ --hash e2535d638859bb63ea9ea5cf467562cba63b007eae1acd0d73a3f259c582561f \ --public_key c3NoLWVkMjU1MTkgQUFBQUMzTnphQzFsWkRJMU5URTVBQUFBSVA3M2tuT0tKYVNyVEtEa2U2OEgvRlJoODRZWU5CU0tBN1hPVWRpWmJjeG8gdGVzdEByZWtvci5kZXYK \ --key_format ssh \ --email jpenumak@redhat.com cargo run --example search_index -- \ --public_key c3NoLWVkMjU1MTkgQUFBQUMzTnphQzFsWkRJMU5URTVBQUFBSVA3M2tuT0tKYVNyVEtEa2U2OEgvRlJoODRZWU5CU0tBN1hPVWRpWmJjeG8gdGVzdEByZWtvci5kZXYK \ --key_format ssh \ --email jpenumak@redhat.com The server might return an error sometimes, this is because the result depends on the kind of rekor object that gets returned. */ let matches = Command::new("cmd") .arg(Arg::new("hash") .long("hash") .value_name("HASH") .help("hash of the artifact")) .arg(Arg::new("url") .long("url") .value_name("URL") .help("url containing the contents of the artifact (raw github url)")) .arg(Arg::new("public_key") .long("public_key") .value_name("PUBLIC_KEY") .help("base64 encoded public_key. Look at https://raw.githubusercontent.com/jyotsna-penumaka/rekor-rs/rekor-functionality/test_data/create_log_entry.md for more details on generating keys.")) .arg(Arg::new("key_format") .long("key_format") .value_name("KEY_FORMAT") .help("Accepted formats are : pgp / x509 / minsign / ssh / tuf")) .arg(Arg::new("email") .long("email") .value_name("EMAIL") .help("Author's email")); let flags = matches.get_matches(); // The following default values will be used if the user does not input values using cli flags const HASH: &str = "c7ead87fa5c82d2b17feece1c2ee1bda8e94788f4b208de5057b3617a42b7413"; const PUBLIC_KEY: &str = "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFeEhUTWRSQk80ZThCcGZ3cG5KMlozT2JMRlVrVQpaUVp6WGxtKzdyd1lZKzhSMUZpRWhmS0JZclZraGpHL2lCUjZac2s3Z01iYWZPOG9FM01lUEVvWU93PT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg=="; const KEY_FORMAT: &str = "x509"; const EMAIL: &str = "jpenumak@redhat.com"; let key_format = match flags .get_one::("key_format") .unwrap_or(&KEY_FORMAT.to_string()) .as_str() { "pgp" => Format::Pgp, "x509" => Format::X509, "minisign" => Format::Minisign, "ssh" => Format::Ssh, _ => Format::Tuf, }; let public_key = search_index_public_key::SearchIndexPublicKey { format: key_format, content: Some( flags .get_one::("public_key") .unwrap_or(&PUBLIC_KEY.to_string()) .to_owned(), ), url: None, }; let query = SearchIndex { email: Some( flags .get_one::("email") .unwrap_or(&EMAIL.to_string()) .to_owned(), ), public_key: Some(public_key), hash: Some( flags .get_one("hash") .unwrap_or(&HASH.to_string()) .to_owned(), ), }; let configuration = Configuration::default(); let uuid_vec = index_api::search_index(&configuration, query).await; println!("{:#?}", uuid_vec); } ================================================ FILE: examples/rekor/search_log_query/main.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use clap::{Arg, Command}; use sigstore::rekor::apis::{configuration::Configuration, entries_api}; use sigstore::rekor::models::{ ProposedEntry, SearchLogQuery, hashedrekord::{AlgorithmKind, Data, Hash, PublicKey, Signature, Spec}, }; use std::str::FromStr; #[tokio::main] async fn main() { /* Searches transparency log for one or more log entries. Returns zero or more entries from the transparency log, according to how many were included in request query. Example command : cargo run --example search_log_query -- \ --hash c7ead87fa5c82d2b17feece1c2ee1bda8e94788f4b208de5057b3617a42b7413\ --url https://raw.githubusercontent.com/jyotsna-penumaka/rekor-rs/rekor-functionality/test_data/data\ --public_key LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFeEhUTWRSQk80ZThCcGZ3cG5KMlozT2JMRlVrVQpaUVp6WGxtKzdyd1lZKzhSMUZpRWhmS0JZclZraGpHL2lCUjZac2s3Z01iYWZPOG9FM01lUEVvWU93PT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg==\ --signature MEUCIHWACbBnw+YkJCy2tVQd5i7VH6HgkdVBdP7HRV1IEsDuAiEA19iJNvmkE6We7iZGjHsTkjXV8QhK9iXu0ArUxvJF1N8=\ --key_format x509\ --api_version 0.0.1\ --entry_uuids 1377da9d9dbad451a5a8acdd28add750815d34e8205f1b8a35a67b8a27dae9bf\ --log_indexes 2922253 */ // The following default values will be used if the user does not input values using cli flags const HASH: &str = "c7ead87fa5c82d2b17feece1c2ee1bda8e94788f4b208de5057b3617a42b7413"; const PUBLIC_KEY: &str = "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFeEhUTWRSQk80ZThCcGZ3cG5KMlozT2JMRlVrVQpaUVp6WGxtKzdyd1lZKzhSMUZpRWhmS0JZclZraGpHL2lCUjZac2s3Z01iYWZPOG9FM01lUEVvWU93PT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg=="; const SIGNATURE: &str = "MEUCIHWACbBnw+YkJCy2tVQd5i7VH6HgkdVBdP7HRV1IEsDuAiEA19iJNvmkE6We7iZGjHsTkjXV8QhK9iXu0ArUxvJF1N8="; const API_VERSION: &str = "0.0.1"; const ENTRY_UUIDS: &str = "1377da9d9dbad451a5a8acdd28add750815d34e8205f1b8a35a67b8a27dae9bf"; const LOG_INDEXES: &str = "2922253"; let matches = Command::new("cmd") .arg(Arg::new("hash") .long("hash") .value_name("HASH") .help("hash of the artifact")) .arg(Arg::new("url") .long("url") .value_name("URL") .help("url containing the contents of the artifact (raw github url)")) .arg(Arg::new("public_key") .long("public_key") .value_name("PUBLIC_KEY") .help("base64 encoded public_key. Look at https://raw.githubusercontent.com/jyotsna-penumaka/rekor-rs/rekor-functionality/test_data/create_log_entry.md for more details on generating keys.")) .arg(Arg::new("key_format") .long("key_format") .value_name("KEY_FORMAT") .help("Accepted formats are : pgp / x509 / minsign / ssh / tuf")) .arg(Arg::new("signature") .long("signature") .value_name("SIGNATURE") .help("base64 encoded signature of the artifact. Look at https://raw.githubusercontent.com/jyotsna-penumaka/rekor-rs/rekor-functionality/test_data/create_log_entry.md for more details on generating keys.")) .arg(Arg::new("api_version") .long("api_version") .value_name("API_VERSION") .help("Rekor-rs open api version")) .arg(Arg::new("entry_uuids") .long("entry_uuids") .value_name("ENTRY_UUIDS") .help("the uuids of the entries to search for")) .arg(Arg::new("log_indexes") .long("log_indexes") .value_name("LOG_INDEXES") .help("the log_indexes of the entries to search for")); let flags = matches.get_matches(); let hash = Hash::new( AlgorithmKind::sha256, flags .get_one::("hash") .unwrap_or(&HASH.to_string()) .to_owned(), ); let data = Data::new(hash); let public_key = PublicKey::new( flags .get_one::("public_key") .unwrap_or(&PUBLIC_KEY.to_string()) .to_owned(), ); let signature = Signature::new( flags .get_one::("signature") .unwrap_or(&SIGNATURE.to_string()) .to_owned(), public_key, ); let spec = Spec::new(signature, data); let proposed_entry = ProposedEntry::Hashedrekord { api_version: flags .get_one::("api_version") .unwrap_or(&API_VERSION.to_string()) .to_owned(), spec, }; let query = SearchLogQuery { entry_uuids: Some(vec![ flags .get_one::("entry_uuids") .unwrap_or(&ENTRY_UUIDS.to_string()) .to_owned(), ]), log_indexes: Some(vec![ i32::from_str( flags .get_one::("log_indexes") .unwrap_or(&LOG_INDEXES.to_string()), ) .unwrap(), ]), entries: Some(vec![proposed_entry]), }; let configuration = Configuration::default(); let message = entries_api::search_log_query(&configuration, query) .await .unwrap(); println!("{}", message); } ================================================ FILE: rust-toolchain.toml ================================================ [toolchain] # Pin to a specific stable version for consistent clippy/fmt across contributors # Update this deliberately when ready to adopt new Rust features or fix new warnings # Minimum 1.89.0 required for edition 2024 support channel = "1.95.0" components = ["clippy", "rustfmt"] ================================================ FILE: src/bundle/mod.rs ================================================ // Copyright 2023 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Sigstore bundle support. pub use sigstore_protobuf_specs::dev::sigstore::bundle::v1::Bundle; mod models; #[cfg_attr(docsrs, doc(cfg(feature = "sign")))] #[cfg(feature = "sign")] pub mod sign; #[cfg_attr(docsrs, doc(cfg(feature = "verify")))] #[cfg(feature = "verify")] pub mod verify; ================================================ FILE: src/bundle/models.rs ================================================ use std::fmt::Display; use std::str::FromStr; use base64::{Engine as _, engine::general_purpose::STANDARD as base64}; use sigstore_protobuf_specs::dev::sigstore::{ common::v1::LogId, rekor::v1::{Checkpoint, InclusionPromise, InclusionProof, KindVersion, TransparencyLogEntry}, }; use crate::rekor::models::{LogEntry as RekorLogEntry, log_entry::RekorInclusionProof}; // Known Sigstore bundle media types. #[derive(Clone, Copy, Debug)] pub enum Version { Bundle0_1, Bundle0_2, } impl Display for Version { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(match &self { Version::Bundle0_1 => "application/vnd.dev.sigstore.bundle+json;version=0.1", Version::Bundle0_2 => "application/vnd.dev.sigstore.bundle+json;version=0.2", }) } } impl FromStr for Version { type Err = (); fn from_str(s: &str) -> Result { match s { "application/vnd.dev.sigstore.bundle+json;version=0.1" => Ok(Version::Bundle0_1), "application/vnd.dev.sigstore.bundle+json;version=0.2" => Ok(Version::Bundle0_2), _ => Err(()), } } } #[inline] fn decode_hex>(hex: S) -> Result, ()> { hex::decode(hex.as_ref()).or(Err(())) } impl TryFrom for InclusionProof { type Error = (); fn try_from(value: RekorInclusionProof) -> Result { let hashes = value .hashes .iter() .map(decode_hex) .collect::, _>>()?; Ok(InclusionProof { checkpoint: Some(Checkpoint { envelope: value.checkpoint, }), hashes, log_index: value.log_index, root_hash: decode_hex(value.root_hash)?, tree_size: value.tree_size as i64, }) } } /// Convert log entries returned from Rekor into Sigstore Bundle format entries. impl TryFrom for TransparencyLogEntry { type Error = (); fn try_from(value: RekorLogEntry) -> Result { let canonicalized_body = serde_json_canonicalizer::to_string(&value.body) .map_err(|_| ())? .into_bytes(); let inclusion_promise = Some(InclusionPromise { signed_entry_timestamp: base64 .decode(value.verification.signed_entry_timestamp) .or(Err(()))?, }); let inclusion_proof = value .verification .inclusion_proof .map(|p| p.try_into()) .transpose()?; Ok(TransparencyLogEntry { canonicalized_body, inclusion_promise, inclusion_proof, integrated_time: value.integrated_time, kind_version: Some(KindVersion { kind: "hashedrekord".to_owned(), version: "0.0.1".to_owned(), }), log_id: Some(LogId { key_id: decode_hex(value.log_i_d)?, }), log_index: value.log_index, }) } } ================================================ FILE: src/bundle/sign.rs ================================================ // Copyright 2023 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Types for signing artifacts and producing Sigstore bundles. use std::{ io::{self, Read}, time::SystemTime, }; use base64::{Engine as _, engine::general_purpose::STANDARD as base64}; use hex; use p256::NistP256; use pkcs8::der::{Encode, EncodePem}; use sha2::{Digest, Sha256}; use signature::DigestSigner; use sigstore_protobuf_specs::dev::sigstore::bundle::v1::bundle; use sigstore_protobuf_specs::dev::sigstore::bundle::v1::{ Bundle, VerificationMaterial, verification_material, }; use sigstore_protobuf_specs::dev::sigstore::common::v1::{ HashAlgorithm, HashOutput, MessageSignature, X509Certificate, X509CertificateChain, }; use sigstore_protobuf_specs::dev::sigstore::rekor::v1::TransparencyLogEntry; use tokio::io::AsyncRead; use tokio_util::io::SyncIoBridge; use url::Url; use x509_cert::attr::{AttributeTypeAndValue, AttributeValue}; use x509_cert::builder::{Builder, RequestBuilder as CertRequestBuilder}; use x509_cert::ext::pkix as x509_ext; use crate::bundle::models::Version; use crate::crypto::keyring::Keyring; use crate::crypto::transparency::{CertificateEmbeddedSCT, verify_sct}; use crate::errors::{Result as SigstoreResult, SigstoreError}; use crate::fulcio::oauth::OauthTokenProvider; use crate::fulcio::{self, FULCIO_ROOT, FulcioClient}; use crate::oauth::IdentityToken; use crate::rekor::apis::configuration::Configuration as RekorConfiguration; use crate::rekor::apis::entries_api::create_log_entry; use crate::rekor::models::{hashedrekord, proposed_entry::ProposedEntry as ProposedLogEntry}; use crate::trust::TrustRoot; #[cfg(feature = "sigstore-trust-root")] use crate::trust::sigstore::SigstoreTrustRoot; /// An asynchronous Sigstore signing session. /// /// Sessions hold a provided user identity and key materials tied to that identity. A single /// session may be used to sign multiple items. For more information, see [`SigningSession::sign`]. /// /// This signing session operates asynchronously. To construct a synchronous [`blocking::SigningSession`], /// use [`SigningContext::blocking_signer()`]. pub struct SigningSession<'ctx> { context: &'ctx SigningContext, identity_token: IdentityToken, private_key: ecdsa::SigningKey, certs: fulcio::CertificateResponse, } impl<'ctx> SigningSession<'ctx> { async fn new( context: &'ctx SigningContext, identity_token: IdentityToken, ) -> SigstoreResult> { let (private_key, certs) = Self::materials(&context.fulcio, &identity_token).await?; Ok(Self { context, identity_token, private_key, certs, }) } async fn materials( fulcio: &FulcioClient, token: &IdentityToken, ) -> SigstoreResult<(ecdsa::SigningKey, fulcio::CertificateResponse)> { let subject = // SEQUENCE OF RelativeDistinguishedName vec![ // SET OF AttributeTypeAndValue vec![ // AttributeTypeAndValue, `emailAddress=...` AttributeTypeAndValue { oid: const_oid::db::rfc3280::EMAIL_ADDRESS, value: AttributeValue::new( pkcs8::der::Tag::Utf8String, token.unverified_claims().email.as_ref(), )?, } ].try_into()? ].into(); let mut rng = rand::thread_rng(); let private_key = ecdsa::SigningKey::from(p256::SecretKey::random(&mut rng)); let mut builder = CertRequestBuilder::new(subject, &private_key)?; builder.add_extension(&x509_ext::BasicConstraints { ca: false, path_len_constraint: None, })?; let cert_req = builder.build::()?; Ok((private_key, fulcio.request_cert_v2(cert_req, token).await?)) } /// Check if the session's identity token or key material is expired. /// /// If the session is expired, it cannot be used for signing operations, and a new session /// must be created with a fresh identity token. pub fn is_expired(&self) -> bool { let not_after = self .certs .cert .tbs_certificate .validity .not_after .to_system_time(); !self.identity_token.in_validity_period() || SystemTime::now() > not_after } async fn sign_digest(&self, hasher: Sha256) -> SigstoreResult { if self.is_expired() { return Err(SigstoreError::ExpiredSigningSession()); } if let Some(detached_sct) = &self.certs.detached_sct { verify_sct(detached_sct, &self.context.ctfe_keyring)?; } else { let sct = CertificateEmbeddedSCT::new(&self.certs.cert, &self.certs.chain)?; verify_sct(&sct, &self.context.ctfe_keyring)?; } // Sign artifact. let input_hash: &[u8] = &hasher.clone().finalize(); let artifact_signature: p256::ecdsa::Signature = self.private_key.sign_digest(hasher); let signature_bytes = artifact_signature.to_der().as_bytes().to_owned(); let cert = &self.certs.cert; // Create the transparency log entry. let proposed_entry = ProposedLogEntry::Hashedrekord { api_version: "0.0.1".to_owned(), spec: hashedrekord::Spec { signature: hashedrekord::Signature { content: base64.encode(&signature_bytes), public_key: hashedrekord::PublicKey::new( base64.encode(cert.to_pem(pkcs8::LineEnding::LF)?), ), }, data: hashedrekord::Data { hash: hashedrekord::Hash { algorithm: hashedrekord::AlgorithmKind::sha256, value: hex::encode(input_hash), }, }, }, }; let log_entry = create_log_entry(&self.context.rekor_config, proposed_entry) .await .map_err(|err| SigstoreError::RekorClientError(err.to_string()))?; let log_entry = log_entry .try_into() .or(Err(SigstoreError::RekorClientError( "Rekor returned malformed LogEntry".into(), )))?; // TODO(tnytown): Maybe run through the verification flow here? See sigstore-rs#296. Ok(SigningArtifact { input_digest: input_hash.to_owned(), cert: cert.to_der()?, signature: signature_bytes, log_entry, }) } /// Signs for the input with the session's identity. If the identity is expired, /// [`SigstoreError::ExpiredSigningSession`] is returned. pub async fn sign( &self, input: R, ) -> SigstoreResult { if self.is_expired() { return Err(SigstoreError::ExpiredSigningSession()); } let mut sync_input = SyncIoBridge::new(input); let hasher = tokio::task::spawn_blocking(move || -> SigstoreResult<_> { let mut hasher = Sha256::new(); io::copy(&mut sync_input, &mut hasher)?; Ok(hasher) }) .await??; self.sign_digest(hasher).await } } pub mod blocking { use super::{SigningSession as AsyncSigningSession, *}; /// A synchronous Sigstore signing session. /// /// Sessions hold a provided user identity and key materials tied to that identity. A single /// session may be used to sign multiple items. For more information, see [`SigningSession::sign`]. /// /// This signing session operates synchronously, thus it cannot be used in an asynchronous context. /// To construct an asynchronous [`SigningSession`], use [`SigningContext::signer()`]. pub struct SigningSession<'ctx> { inner: AsyncSigningSession<'ctx>, rt: tokio::runtime::Runtime, } impl<'ctx> SigningSession<'ctx> { pub(crate) fn new(ctx: &'ctx SigningContext, token: IdentityToken) -> SigstoreResult { let rt = tokio::runtime::Builder::new_current_thread() .enable_all() .build()?; let inner = rt.block_on(AsyncSigningSession::new(ctx, token))?; Ok(Self { inner, rt }) } /// Check if the session's identity token or key material is expired. /// /// If the session is expired, it cannot be used for signing operations, and a new session /// must be created with a fresh identity token. pub fn is_expired(&self) -> bool { self.inner.is_expired() } /// Signs for the input with the session's identity. If the identity is expired, /// [`SigstoreError::ExpiredSigningSession`] is returned. pub fn sign(&self, mut input: R) -> SigstoreResult { let mut hasher = Sha256::new(); io::copy(&mut input, &mut hasher)?; self.rt.block_on(self.inner.sign_digest(hasher)) } } } /// A Sigstore signing context. /// /// Contexts hold Fulcio (CA) and Rekor (CT) configurations which signing sessions can be /// constructed against. Use [`SigningContext::production`] to create a context against /// the public-good Sigstore infrastructure. pub struct SigningContext { fulcio: FulcioClient, rekor_config: RekorConfiguration, ctfe_keyring: Keyring, } impl SigningContext { /// Manually constructs a [`SigningContext`] from its constituent data. pub fn new( fulcio: FulcioClient, rekor_config: RekorConfiguration, ctfe_keyring: Keyring, ) -> Self { Self { fulcio, rekor_config, ctfe_keyring, } } /// Returns a [`SigningContext`] configured against the public-good production Sigstore /// infrastructure. #[cfg_attr(docsrs, doc(cfg(feature = "sigstore-trust-root")))] #[cfg(feature = "sigstore-trust-root")] pub async fn async_production() -> SigstoreResult { let trust_root = SigstoreTrustRoot::new(None).await?; Ok(Self::new( FulcioClient::new( Url::parse(FULCIO_ROOT).expect("constant FULCIO root fails to parse!"), crate::fulcio::TokenProvider::Oauth(OauthTokenProvider::default()), ), Default::default(), Keyring::new(trust_root.ctfe_keys()?.values().copied())?, )) } /// Returns a [`SigningContext`] configured against the public-good production Sigstore /// infrastructure. /// /// Async callers should use [`SigningContext::async_production`]. #[cfg_attr(docsrs, doc(cfg(feature = "sigstore-trust-root")))] #[cfg(feature = "sigstore-trust-root")] pub fn production() -> SigstoreResult { let rt = tokio::runtime::Builder::new_current_thread() .enable_all() .build()?; rt.block_on(Self::async_production()) } /// Configures and returns a [`SigningSession`] with the held context. pub async fn signer( &self, identity_token: IdentityToken, ) -> SigstoreResult> { SigningSession::new(self, identity_token).await } /// Configures and returns a [`blocking::SigningSession`] with the held context. /// /// Async contexts must use [`SigningContext::signer`]. pub fn blocking_signer( &self, identity_token: IdentityToken, ) -> SigstoreResult> { blocking::SigningSession::new(self, identity_token) } } /// A signature and its associated metadata. pub struct SigningArtifact { input_digest: Vec, cert: Vec, signature: Vec, log_entry: TransparencyLogEntry, } impl SigningArtifact { /// Consumes the signing artifact and produces a Sigstore [`Bundle`]. /// /// The resulting bundle can be serialized with [`serde_json`]. pub fn to_bundle(self) -> Bundle { // NOTE: We explicitly only include the leaf certificate in the bundle's "chain" // here: the specs explicitly forbid the inclusion of the root certificate, // and discourage inclusion of any intermediates (since they're in the root of // trust already). let x509_certificate_chain = X509CertificateChain { certificates: vec![X509Certificate { raw_bytes: self.cert, }], }; let verification_material = Some(VerificationMaterial { timestamp_verification_data: None, tlog_entries: vec![self.log_entry], content: Some(verification_material::Content::X509CertificateChain( x509_certificate_chain, )), }); let message_signature = MessageSignature { message_digest: Some(HashOutput { algorithm: HashAlgorithm::Sha2256.into(), digest: self.input_digest, }), signature: self.signature, }; Bundle { media_type: Version::Bundle0_2.to_string(), verification_material, content: Some(bundle::Content::MessageSignature(message_signature)), } } } ================================================ FILE: src/bundle/verify/mod.rs ================================================ // // Copyright 2023 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Types for verifying Sigstore bundles with policies. mod models; pub use models::{VerificationError, VerificationResult}; pub mod policy; pub use policy::{PolicyError, VerificationPolicy}; mod verifier; pub use verifier::*; ================================================ FILE: src/bundle/verify/models.rs ================================================ // // Copyright 2023 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::str::FromStr; use crate::{ bundle::{Bundle, models::Version as BundleVersion}, crypto::certificate::{CertificateValidationError, is_leaf, is_root_ca}, rekor::models as rekor, }; use base64::{Engine as _, engine::general_purpose::STANDARD as base64}; use sigstore_protobuf_specs::dev::sigstore::{ bundle::v1::{bundle, verification_material}, rekor::v1::{InclusionProof, TransparencyLogEntry}, }; use thiserror::Error; use tracing::{debug, error, warn}; use x509_cert::{ Certificate, der::{Decode, EncodePem}, }; use super::policy::PolicyError; #[derive(Error, Debug)] pub enum Bundle01ProfileErrorKind { #[error("bundle must contain inclusion promise")] InclusionPromiseMissing, } #[derive(Error, Debug)] pub enum Bundle02ProfileErrorKind { #[error("bundle must contain inclusion proof")] InclusionProofMissing, #[error("bundle must contain checkpoint")] CheckpointMissing, } #[derive(Error, Debug)] #[error(transparent)] pub enum BundleProfileErrorKind { Bundle01Profile(#[from] Bundle01ProfileErrorKind), Bundle02Profile(#[from] Bundle02ProfileErrorKind), #[error("unknown bundle profile {0}")] Unknown(String), } #[derive(Error, Debug)] pub enum BundleErrorKind { #[error("bundle missing VerificationMaterial")] VerificationMaterialMissing, #[error("bundle includes unsupported VerificationMaterial::Content")] VerificationMaterialContentUnsupported, #[error("bundle's certificate(s) are malformed")] CertificateMalformed(#[source] x509_cert::der::Error), #[error("bundle contains a root certificate")] RootInChain, #[error("bundle does not contain the signing (leaf) certificate")] NoLeaf(#[source] CertificateValidationError), #[error("bundle does not contain any certificates")] CertificatesMissing, #[error("bundle does not contain signature")] SignatureMissing, #[error("bundle includes unsupported DSSE signature")] DsseUnsupported, #[error("bundle needs 1 tlog entry, got {0}")] TlogEntry(usize), #[error(transparent)] BundleProfile(#[from] BundleProfileErrorKind), } #[derive(Error, Debug)] pub enum CertificateErrorKind { #[error("certificate malformed")] Malformed(#[source] webpki::Error), #[error("certificate expired before time of signing")] Expired, #[error("certificate SCT verification failed")] Sct(#[source] crate::crypto::transparency::SCTError), #[error("certificate verification failed")] VerificationFailed(#[source] webpki::Error), } #[derive(Error, Debug)] pub enum SignatureErrorKind { #[error("unsupported signature algorithm")] AlgoUnsupported(#[source] crate::errors::SigstoreError), #[error("signature verification failed")] VerificationFailed(#[source] crate::errors::SigstoreError), #[error("signature transparency materials are inconsistent")] Transparency, } #[derive(Error, Debug)] #[error(transparent)] pub enum VerificationError { #[error("unable to read input")] Input(#[source] std::io::Error), Bundle(#[from] BundleErrorKind), Certificate(#[from] CertificateErrorKind), Signature(#[from] SignatureErrorKind), Policy(#[from] PolicyError), } pub type VerificationResult = Result<(), VerificationError>; pub struct CheckedBundle { pub(crate) certificate: Certificate, pub(crate) signature: Vec, tlog_entry: TransparencyLogEntry, } impl TryFrom for CheckedBundle { type Error = BundleErrorKind; fn try_from(input: Bundle) -> Result { let (content, mut tlog_entries) = match input.verification_material { Some(m) => (m.content, m.tlog_entries), _ => return Err(BundleErrorKind::VerificationMaterialMissing), }; // Parse the certificates. The first entry in the chain MUST be a leaf certificate, and the // rest of the chain MUST NOT include a root CA or any intermediate CAs that appear in an // independent root of trust. let certs = match content { Some(verification_material::Content::X509CertificateChain(ch)) => ch.certificates, Some(verification_material::Content::Certificate(cert)) => { vec![cert] } _ => return Err(BundleErrorKind::VerificationMaterialContentUnsupported), }; let certs = certs .iter() .map(|c| c.raw_bytes.as_slice()) .map(Certificate::from_der) .collect::, _>>() .map_err(BundleErrorKind::CertificateMalformed)?; let [leaf_cert, chain_certs @ ..] = &certs[..] else { return Err(BundleErrorKind::CertificatesMissing); }; is_leaf(leaf_cert).map_err(BundleErrorKind::NoLeaf)?; for chain_cert in chain_certs { if is_root_ca(chain_cert).is_ok() { return Err(BundleErrorKind::RootInChain); } } let signature = match input.content.ok_or(BundleErrorKind::SignatureMissing)? { bundle::Content::MessageSignature(s) => s.signature, _ => return Err(BundleErrorKind::DsseUnsupported), }; if tlog_entries.len() != 1 { return Err(BundleErrorKind::TlogEntry(tlog_entries.len())); } let tlog_entry = tlog_entries.remove(0); let (inclusion_promise, inclusion_proof) = (&tlog_entry.inclusion_promise, &tlog_entry.inclusion_proof); // `inclusion_proof` is a required field in the current protobuf spec, // but older versions of Rekor didn't provide it. Check invariants // here and selectively allow for this case. // // https://github.com/sigstore/sigstore-python/pull/634#discussion_r1182769140 let check_01_bundle = || -> Result<(), BundleProfileErrorKind> { if inclusion_promise.is_none() { return Err(Bundle01ProfileErrorKind::InclusionPromiseMissing)?; } if matches!( inclusion_proof, Some(InclusionProof { checkpoint: None, .. }) ) { debug!("0.1 bundle contains inclusion proof without checkpoint"); } Ok(()) }; let check_02_bundle = || -> Result<(), BundleProfileErrorKind> { if inclusion_proof.is_none() { error!("bundle must contain inclusion proof"); return Err(Bundle02ProfileErrorKind::InclusionProofMissing)?; } if matches!( inclusion_proof, Some(InclusionProof { checkpoint: None, .. }) ) { error!("bundle must contain checkpoint"); return Err(Bundle02ProfileErrorKind::CheckpointMissing)?; } Ok(()) }; match BundleVersion::from_str(&input.media_type) { Ok(BundleVersion::Bundle0_1) => check_01_bundle()?, Ok(BundleVersion::Bundle0_2) => check_02_bundle()?, Err(_) => return Err(BundleProfileErrorKind::Unknown(input.media_type))?, } Ok(Self { certificate: leaf_cert.clone(), signature, tlog_entry, }) } } impl CheckedBundle { /// Retrieves and checks consistency of the bundle's [TransparencyLogEntry]. pub fn tlog_entry(&self, offline: bool, input_digest: &[u8]) -> Option<&TransparencyLogEntry> { let base64_pem_certificate = base64.encode(self.certificate.to_pem(pkcs8::LineEnding::LF).ok()?); let expected_entry = rekor::Hashedrekord { kind: "hashedrekord".to_owned(), api_version: "0.0.1".to_owned(), spec: rekor::hashedrekord::Spec { signature: rekor::hashedrekord::Signature { content: base64.encode(&self.signature), public_key: rekor::hashedrekord::PublicKey::new(base64_pem_certificate), }, data: rekor::hashedrekord::Data { hash: rekor::hashedrekord::Hash { algorithm: rekor::hashedrekord::AlgorithmKind::sha256, value: hex::encode(input_digest), }, }, }, }; let entry = if !offline && self.tlog_entry.inclusion_proof.is_none() { warn!("online rekor fetching is not implemented yet, but is necessary for this bundle"); return None; } else { &self.tlog_entry }; let actual: serde_json::Value = serde_json::from_slice(&self.tlog_entry.canonicalized_body).ok()?; let expected: serde_json::Value = serde_json::to_value(expected_entry).ok()?; if actual != expected { return None; } Some(entry) } } ================================================ FILE: src/bundle/verify/policy.rs ================================================ // Copyright 2023 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Verification constraints for certificate metadata. //! //! use const_oid::ObjectIdentifier; use thiserror::Error; use tracing::warn; use x509_cert::ext::pkix::{SubjectAltName, name::GeneralName}; macro_rules! oids { ($($name:ident = $value:literal),+) => { $(const $name: ObjectIdentifier = ObjectIdentifier::new_unwrap($value);)+ }; } macro_rules! impl_policy { ($policy:ident, $oid:expr, $doc:literal) => { #[doc = $doc] pub struct $policy(pub String); impl const_oid::AssociatedOid for $policy { const OID: ObjectIdentifier = $oid; } impl SingleX509ExtPolicy for $policy { fn new>(val: S) -> Self { Self(val.as_ref().to_owned()) } fn name() -> &'static str { stringify!($policy) } fn value(&self) -> &str { &self.0 } } }; } oids! { OIDC_ISSUER_OID = "1.3.6.1.4.1.57264.1.1", OIDC_GITHUB_WORKFLOW_TRIGGER_OID = "1.3.6.1.4.1.57264.1.2", OIDC_GITHUB_WORKFLOW_SHA_OID = "1.3.6.1.4.1.57264.1.3", OIDC_GITHUB_WORKFLOW_NAME_OID = "1.3.6.1.4.1.57264.1.4", OIDC_GITHUB_WORKFLOW_REPOSITORY_OID = "1.3.6.1.4.1.57264.1.5", OIDC_GITHUB_WORKFLOW_REF_OID = "1.3.6.1.4.1.57264.1.6", OTHERNAME_OID = "1.3.6.1.4.1.57264.1.7" } #[derive(Error, Debug)] pub enum PolicyError { #[error("did not find exactly 1 of the required extension in the certificate")] ExtensionNotFound, #[error("certificate's {extension} does not match (got {actual}, expected {expected})")] ExtensionCheckFailed { extension: String, expected: String, actual: String, }, #[error("{0} of {total} policies failed: {1}\n- ", errors.len(), errors.iter().map(|e| e.to_string()).collect::>().join("\n- ") )] AllOf { total: usize, errors: Vec, }, #[error("0 of {total} policies succeeded")] AnyOf { total: usize }, } pub type PolicyResult = Result<(), PolicyError>; /// A policy that checks a single textual value against a X.509 extension. pub trait SingleX509ExtPolicy { fn new>(val: S) -> Self; fn name() -> &'static str; fn value(&self) -> &str; } impl VerificationPolicy for T { fn verify(&self, cert: &x509_cert::Certificate) -> PolicyResult { let extensions = cert.tbs_certificate.extensions.as_deref().unwrap_or(&[]); let mut extensions = extensions.iter().filter(|ext| ext.extn_id == T::OID); // Check for exactly one extension. let (Some(ext), None) = (extensions.next(), extensions.next()) else { return Err(PolicyError::ExtensionNotFound); }; // Parse raw string without DER encoding. let val = std::str::from_utf8(ext.extn_value.as_bytes()) .or(Err(PolicyError::ExtensionNotFound))?; if val != self.value() { return Err(PolicyError::ExtensionCheckFailed { extension: T::name().to_owned(), expected: self.value().to_owned(), actual: val.to_owned(), }); } Ok(()) } } impl_policy!( OIDCIssuer, OIDC_ISSUER_OID, "Checks the certificate's OIDC issuer." ); impl_policy!( GitHubWorkflowTrigger, OIDC_GITHUB_WORKFLOW_TRIGGER_OID, "Checks the certificate's GitHub Actions workflow trigger." ); impl_policy!( GitHubWorkflowSHA, OIDC_GITHUB_WORKFLOW_SHA_OID, "Checks the certificate's GitHub Actions workflow commit SHA." ); impl_policy!( GitHubWorkflowName, OIDC_GITHUB_WORKFLOW_NAME_OID, "Checks the certificate's GitHub Actions workflow name." ); impl_policy!( GitHubWorkflowRepository, OIDC_GITHUB_WORKFLOW_REPOSITORY_OID, "Checks the certificate's GitHub Actions workflow repository." ); impl_policy!( GitHubWorkflowRef, OIDC_GITHUB_WORKFLOW_REF_OID, "Checks the certificate's GitHub Actions workflow ref." ); /// An interface that all policies must conform to. pub trait VerificationPolicy { fn verify(&self, cert: &x509_cert::Certificate) -> PolicyResult; } /// The "any of" policy, corresponding to a logical OR between child policies. /// /// An empty list of child policies is considered trivially invalid. pub struct AnyOf<'a> { children: Vec<&'a dyn VerificationPolicy>, } impl<'a> AnyOf<'a> { pub fn new(policies: I) -> Self where I: IntoIterator, { Self { children: policies.into_iter().collect(), } } } impl VerificationPolicy for AnyOf<'_> { fn verify(&self, cert: &x509_cert::Certificate) -> PolicyResult { self.children .iter() .find(|policy| policy.verify(cert).is_err()) .map_or(Ok(()), |_| { Err(PolicyError::AnyOf { total: self.children.len(), }) }) } } /// The "all of" policy, corresponding to a logical AND between child policies. /// /// An empty list of child policies is considered trivially invalid. pub struct AllOf<'a> { children: Vec<&'a dyn VerificationPolicy>, } impl<'a> AllOf<'a> { pub fn new(policies: I) -> Option where I: IntoIterator, { let children: Vec<_> = policies.into_iter().collect(); // Without this, we'd be able to construct an `AllOf` containing an empty list of child // policies. This is almost certainly not what the user wants and is a potential source // of API misuse, so we explicitly disallow it. if children.is_empty() { warn!("attempted to construct an AllOf with an empty list of child policies"); return None; } Some(Self { children }) } } impl VerificationPolicy for AllOf<'_> { fn verify(&self, cert: &x509_cert::Certificate) -> PolicyResult { let results = self.children.iter().map(|policy| policy.verify(cert).err()); let failures: Vec<_> = results.flatten().collect(); if !failures.is_empty() { return Err(PolicyError::AllOf { total: self.children.len(), errors: failures, }); } Ok(()) } } /// Verifies the certificate's "identity", corresponding to the X.509v3 SAN. /// Identities are verified modulo an OIDC issuer, so the issuer's URI /// is also required. /// /// Supported SAN types include emails, URIs, and Sigstore-specific "other names". pub struct Identity { identity: String, issuer: OIDCIssuer, } impl Identity { pub fn new(identity: A, issuer: B) -> Self where A: AsRef, B: AsRef, { Self { identity: identity.as_ref().to_owned(), issuer: OIDCIssuer::new(issuer), } } } impl VerificationPolicy for Identity { fn verify(&self, cert: &x509_cert::Certificate) -> PolicyResult { self.issuer.verify(cert)?; let (_, san): (bool, SubjectAltName) = match cert.tbs_certificate.get() { Ok(Some(result)) => result, _ => return Err(PolicyError::ExtensionNotFound), }; let names: Vec<_> = san .0 .iter() .filter_map(|name| match name { GeneralName::Rfc822Name(name) => Some(name.as_str()), GeneralName::UniformResourceIdentifier(name) => Some(name.as_str()), GeneralName::OtherName(name) if name.type_id == OTHERNAME_OID => { std::str::from_utf8(name.value.value()).ok() } _ => None, }) .collect(); if !names.contains(&self.identity.as_str()) { return Err(PolicyError::ExtensionCheckFailed { extension: "SubjectAltName".to_owned(), expected: self.identity.clone(), actual: names.join(", "), }); } Ok(()) } } ================================================ FILE: src/bundle/verify/verifier.rs ================================================ // Copyright 2023 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Verifiers: async and blocking. use std::io::{self, Read}; use pki_types::{CertificateDer, UnixTime}; use sha2::{Digest, Sha256}; use tokio::io::{AsyncRead, AsyncReadExt}; use tracing::debug; use x509_cert::der::Encode; use crate::{ bundle::Bundle, crypto::{ CertificatePool, CosignVerificationKey, Signature, keyring::Keyring, transparency::{CertificateEmbeddedSCT, verify_sct}, }, errors::Result as SigstoreResult, rekor::apis::configuration::Configuration as RekorConfiguration, trust::TrustRoot, }; #[cfg(feature = "sigstore-trust-root")] use crate::trust::sigstore::SigstoreTrustRoot; use super::{ VerificationError, VerificationResult, models::{CertificateErrorKind, CheckedBundle, SignatureErrorKind}, policy::VerificationPolicy, }; /// An asynchronous Sigstore verifier. /// /// For synchronous usage, see [`Verifier`]. pub struct Verifier { #[allow(dead_code)] rekor_config: RekorConfiguration, cert_pool: CertificatePool, ctfe_keyring: Keyring, } impl Verifier { /// Constructs a [`Verifier`]. /// /// For verifications against the public-good trust root, use [`Verifier::production()`]. pub fn new( rekor_config: RekorConfiguration, trust_repo: R, ) -> SigstoreResult { let cert_pool = CertificatePool::from_certificates(trust_repo.fulcio_certs()?, [])?; let ctfe_keyring = Keyring::new(trust_repo.ctfe_keys()?.values().copied())?; Ok(Self { rekor_config, cert_pool, ctfe_keyring, }) } /// Verifies an input digest against the given Sigstore Bundle, ensuring conformance to the /// provided [`VerificationPolicy`]. pub async fn verify_digest

( &self, input_digest: Sha256, bundle: Bundle, policy: &P, offline: bool, ) -> VerificationResult where P: VerificationPolicy, { let input_digest = input_digest.finalize(); let materials: CheckedBundle = bundle.try_into()?; // In order to verify an artifact, we need to achieve the following: // // 1) Verify that the signing certificate is signed by the certificate // chain and that the signing certificate was valid at the time // of signing. // 2) Verify that the signing certificate belongs to the signer. // 3) Verify that the artifact signature was signed by the public key in the // signing certificate. // 4) Verify that the Rekor entry is consistent with the other signing // materials (preventing CVE-2022-36056) // 5) Verify the inclusion proof supplied by Rekor for this artifact, // if we're doing online verification. // 6) Verify the Signed Entry Timestamp (SET) supplied by Rekor for this // artifact. // 7) Verify that the signing certificate was valid at the time of // signing by comparing the expiry against the integrated timestamp. // 1) Verify that the signing certificate is signed by the certificate // chain and that the signing certificate was valid at the time // of signing. let tbs_certificate = &materials.certificate.tbs_certificate; let issued_at = tbs_certificate.validity.not_before.to_unix_duration(); let cert_der: CertificateDer = materials .certificate .to_der() .expect("failed to DER-encode constructed Certificate!") .into(); let ee_cert = (&cert_der) .try_into() .map_err(CertificateErrorKind::Malformed)?; let trusted_chain = self .cert_pool .verify_cert_with_time(&ee_cert, UnixTime::since_unix_epoch(issued_at)) .map_err(CertificateErrorKind::VerificationFailed)?; debug!("signing certificate chains back to trusted root"); let sct_context = CertificateEmbeddedSCT::new_with_verified_path(&materials.certificate, &trusted_chain) .map_err(CertificateErrorKind::Sct)?; verify_sct(&sct_context, &self.ctfe_keyring).map_err(CertificateErrorKind::Sct)?; debug!("signing certificate's SCT is valid"); // 2) Verify that the signing certificate belongs to the signer. policy.verify(&materials.certificate)?; debug!("signing certificate conforms to policy"); // 3) Verify that the signature was signed by the public key in the signing certificate let signing_key: CosignVerificationKey = (&tbs_certificate.subject_public_key_info) .try_into() .map_err(SignatureErrorKind::AlgoUnsupported)?; let verify_sig = signing_key.verify_prehash(Signature::Raw(&materials.signature), &input_digest); verify_sig.map_err(SignatureErrorKind::VerificationFailed)?; debug!("signature corresponds to public key"); // 4) Verify that the Rekor entry is consistent with the other signing // materials let log_entry = materials .tlog_entry(offline, &input_digest) .ok_or(SignatureErrorKind::Transparency)?; debug!("log entry is consistent with other materials"); // 5) Verify the inclusion proof supplied by Rekor for this artifact, // if we're doing online verification. // TODO(tnytown): Merkle inclusion; sigstore-rs#285 // 6) Verify the Signed Entry Timestamp (SET) supplied by Rekor for this // artifact. // TODO(tnytown) SET verification; sigstore-rs#285 // 7) Verify that the signing certificate was valid at the time of // signing by comparing the expiry against the integrated timestamp. let integrated_time = log_entry.integrated_time as u64; let not_before = tbs_certificate .validity .not_before .to_unix_duration() .as_secs(); let not_after = tbs_certificate .validity .not_after .to_unix_duration() .as_secs(); if integrated_time < not_before || integrated_time > not_after { return Err(CertificateErrorKind::Expired)?; } debug!("data signed during validity period"); debug!("successfully verified!"); Ok(()) } /// Verifies an input against the given Sigstore Bundle, ensuring conformance to the provided /// [`VerificationPolicy`]. pub async fn verify( &self, mut input: R, bundle: Bundle, policy: &P, offline: bool, ) -> VerificationResult where R: AsyncRead + Unpin + Send, P: VerificationPolicy, { // arbitrary buffer size, chosen to be a multiple of the digest size. let mut buf = [0u8; 1024]; let mut hasher = Sha256::new(); loop { match input .read(&mut buf) .await .map_err(VerificationError::Input)? { 0 => break, n => hasher.update(&buf[..n]), } } self.verify_digest(hasher, bundle, policy, offline).await } } impl Verifier { /// Constructs an [`Verifier`] against the public-good trust root. #[cfg(feature = "sigstore-trust-root")] #[cfg_attr(docsrs, doc(cfg(feature = "sigstore-trust-root")))] pub async fn production() -> SigstoreResult { let updater = SigstoreTrustRoot::new(None).await?; Verifier::new(Default::default(), updater) } } pub mod blocking { use super::{Verifier as AsyncVerifier, *}; /// A synchronous Sigstore verifier. pub struct Verifier { inner: AsyncVerifier, rt: tokio::runtime::Runtime, } impl Verifier { /// Constructs a synchronous Sigstore verifier. /// /// For verifications against the public-good trust root, use [`Verifier::production()`]. pub fn new( rekor_config: RekorConfiguration, trust_repo: R, ) -> SigstoreResult { let rt = tokio::runtime::Builder::new_current_thread() .enable_all() .build()?; let inner = AsyncVerifier::new(rekor_config, trust_repo)?; Ok(Self { rt, inner }) } /// Verifies an input digest against the given Sigstore Bundle, ensuring conformance to the /// provided [`VerificationPolicy`]. pub fn verify_digest

( &self, input_digest: Sha256, bundle: Bundle, policy: &P, offline: bool, ) -> VerificationResult where P: VerificationPolicy, { self.rt.block_on( self.inner .verify_digest(input_digest, bundle, policy, offline), ) } /// Verifies an input against the given Sigstore Bundle, ensuring conformance to the provided /// [`VerificationPolicy`]. pub fn verify( &self, mut input: R, bundle: Bundle, policy: &P, offline: bool, ) -> VerificationResult where R: Read, P: VerificationPolicy, { let mut hasher = Sha256::new(); io::copy(&mut input, &mut hasher).map_err(VerificationError::Input)?; self.verify_digest(hasher, bundle, policy, offline) } } impl Verifier { /// Constructs a synchronous [`Verifier`] against the public-good trust root. #[cfg(feature = "sigstore-trust-root")] #[cfg_attr(docsrs, doc(cfg(feature = "sigstore-trust-root")))] pub fn production() -> SigstoreResult { let rt = tokio::runtime::Builder::new_current_thread() .enable_all() .build()?; let inner = rt.block_on(AsyncVerifier::production())?; Ok(Verifier { inner, rt }) } } } ================================================ FILE: src/cosign/bundle.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::{cmp::PartialEq, collections::BTreeMap}; use serde::{Deserialize, Serialize}; use crate::{ crypto::{CosignVerificationKey, Signature}, errors::{Result, SigstoreError}, }; /// Struct that represents the signature bundle as generated by running a /// command that accepts the '--bundle' option. For example: /// /// ```sh,ignore,no_run /// cosign sign-blob --bundle=artifact.bundle artifact.txt /// ``` #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct SignedArtifactBundle { /// Represents the `base64Signature' field which is the signature of the /// of the blob. pub base64_signature: String, /// Represents the 'cert' field which is a PEM encoded certificate. pub cert: String, /// Represents the 'rekorBundle' field. pub rekor_bundle: Bundle, } impl SignedArtifactBundle { /// Create a new verified `SignedArtifactBundle`. /// /// **Note well:** The bundle will be returned only if it can be verified /// using the supplied `rekor_pub_key` public key. pub fn new_verified( raw: &str, rekor_pub_keys: &BTreeMap, ) -> Result { let bundle: SignedArtifactBundle = serde_json::from_str(raw).map_err(|e| { SigstoreError::UnexpectedError(format!("Cannot parse bundle |{raw}|: {e:?}")) })?; Bundle::verify_bundle(&bundle.rekor_bundle, rekor_pub_keys).map(|_| bundle) } } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] #[serde(rename_all = "PascalCase")] pub struct Bundle { pub signed_entry_timestamp: String, pub payload: Payload, } impl Bundle { /// Create a new verified `Bundle` /// /// **Note well:** The bundle will be returned only if it can be verified /// using the supplied `rekor_pub_key` public key. pub(crate) fn new_verified( raw: &str, rekor_pub_keys: &BTreeMap, ) -> Result { let bundle: Bundle = serde_json::from_str(raw).map_err(|e| { SigstoreError::UnexpectedError(format!("Cannot parse bundle |{raw}|: {e:?}")) })?; Self::verify_bundle(&bundle, rekor_pub_keys).map(|_| bundle) } /// Verify a `Bundle`. /// /// **Note well:** The bundle will be returned only if it can be verified /// using the supplied `rekor_pub_key` public key. pub(crate) fn verify_bundle( bundle: &Bundle, rekor_pub_keys: &BTreeMap, ) -> Result<()> { let buf = serde_json_canonicalizer::to_vec(&bundle.payload).map_err(|e| { SigstoreError::UnexpectedError(format!( "Cannot create canonical JSON representation of bundle: {e:?}" )) })?; let rekor_pub_key = rekor_pub_keys.get(&bundle.payload.log_id).ok_or_else(|| { SigstoreError::RekorPublicKeyNotFoundError(bundle.payload.log_id.clone()) })?; rekor_pub_key.verify_signature( Signature::Base64Encoded(bundle.signed_entry_timestamp.as_bytes()), &buf, )?; Ok(()) } } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct Payload { pub body: String, pub integrated_time: i64, pub log_index: i64, #[serde(rename = "logID")] pub log_id: String, } #[cfg(test)] mod tests { use super::*; use serde_json::json; use crate::cosign::tests::get_rekor_public_key; use crate::crypto::SigningScheme; fn build_correct_bundle() -> String { let bundle_json = json!({ "SignedEntryTimestamp": "MEUCIDx9M+yRpD0O47/Mzm8NAPCbtqy4uiTkLWWexW0bo4jZAiEA1wwueIW8XzJWNkut5y9snYj7UOfbMmUXp7fH3CzJmWg=", "Payload": { "body": "eyJhcGlWZXJzaW9uIjoiMC4wLjEiLCJraW5kIjoicmVrb3JkIiwic3BlYyI6eyJkYXRhIjp7Imhhc2giOnsiYWxnb3JpdGhtIjoic2hhMjU2IiwidmFsdWUiOiIzYWY0NDE0ZDIwYzllMWNiNzZjY2M3MmFhZThiMjQyMTY2ZGFiZTZhZjUzMWE0YTc5MGRiOGUyZjBlNWVlN2M5In19LCJzaWduYXR1cmUiOnsiY29udGVudCI6Ik1FWUNJUURXV3hQUWEzWEZVc1BieVRZK24rYlp1LzZQd2hnNVd3eVlEUXRFZlFobzl3SWhBUGtLVzdldWI4YjdCWCtZYmJSYWM4VHd3SXJLNUt4dmR0UTZOdW9EK2l2VyIsImZvcm1hdCI6Ing1MDkiLCJwdWJsaWNLZXkiOnsiY29udGVudCI6IkxTMHRMUzFDUlVkSlRpQlFWVUpNU1VNZ1MwVlpMUzB0TFMwS1RVWnJkMFYzV1VoTGIxcEplbW93UTBGUldVbExiMXBKZW1vd1JFRlJZMFJSWjBGRlRFdG9SRGRHTlU5TGVUYzNXalU0TWxrMmFEQjFNVW96UjA1Qkt3cHJkbFZ6YURSbFMzQmtNV3gzYTBSQmVtWkdSSE0zZVZoRlJYaHpSV3RRVUhWcFVVcENaV3hFVkRZNGJqZFFSRWxYUWk5UlJWazNiWEpCUFQwS0xTMHRMUzFGVGtRZ1VGVkNURWxESUV0RldTMHRMUzB0Q2c9PSJ9fX19", "integratedTime": 1634714179, "logIndex": 783606, "logID": "c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d" } }); serde_json::to_string(&bundle_json).unwrap() } #[test] fn bundle_new_verified_success() { let (key_id, key) = get_rekor_public_key(); let rekor_pub_keys = BTreeMap::from([(key_id, key)]); let bundle_json = build_correct_bundle(); let bundle = Bundle::new_verified(&bundle_json, &rekor_pub_keys); assert!(bundle.is_ok()); } #[test] fn bundle_new_verified_failure_because_different_key_signed_the_bundle() { let public_key = r#"-----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAENptdY/l3nB0yqkXLBWkZWQwo6+cu OSWS1X9vPavpiQOoTTGC0xX57OojUadxF1cdQmrsiReWg2Wn4FneJfa8xw== -----END PUBLIC KEY-----"#; let not_rekor_pub_key = CosignVerificationKey::from_pem(public_key.as_bytes(), &SigningScheme::default()) .expect("Cannot create CosignVerificationKey"); let (key_id, _) = get_rekor_public_key(); let rekor_pub_keys = BTreeMap::from([(key_id, not_rekor_pub_key)]); let bundle_json = build_correct_bundle(); let bundle = Bundle::new_verified(&bundle_json, &rekor_pub_keys); assert!(bundle.is_err()); } #[test] fn bundle_new_verified_failure_because_rekor_key_id_is_unknown() { let (_, key) = get_rekor_public_key(); let key_id = "cf1199155bddd051268d1f16ac5c0c75c009f6fb5a63f4177f8e18d7051e3fa0".to_string(); let rekor_pub_keys = BTreeMap::from([(key_id, key)]); let bundle_json = build_correct_bundle(); let bundle = Bundle::new_verified(&bundle_json, &rekor_pub_keys); assert!(matches!( bundle, Err(SigstoreError::RekorPublicKeyNotFoundError(_)) )); } #[test] fn signedartifactbundle_new_verified_success() { // Bundle as generated by running the following command, and taking the // content from the generated 'artifact.bundle` file: // cosign sign-blob --bundle=artifact.bundle artifact.txt let bundle_raw = r#" {"base64Signature":"MEQCIGp1XZP5zaImosrBhDPCdXn3f8xI9FHGLsGVx6UeRPCgAiAt5GrsdQhOKnZcA3EWecvgJSHzCIjWifFBQkD7Hdsymg==","cert":"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNxRENDQWkrZ0F3SUJBZ0lVVFBXVGZPLzFOUmFTRmRlY2FBUS9wQkRHSnA4d0NnWUlLb1pJemowRUF3TXcKTnpFVk1CTUdBMVVFQ2hNTWMybG5jM1J2Y21VdVpHVjJNUjR3SEFZRFZRUURFeFZ6YVdkemRHOXlaUzFwYm5SbApjbTFsWkdsaGRHVXdIaGNOTWpJeE1USTFNRGN6TnpFeVdoY05Nakl4TVRJMU1EYzBOekV5V2pBQU1Ga3dFd1lICktvWkl6ajBDQVFZSUtvWkl6ajBEQVFjRFFnQUVKUVE0Vy81WFA5bTRZYldSQlF0SEdXd245dVVoYWUzOFVwY0oKcEVNM0RPczR6VzRNSXJNZlc0V1FEMGZ3cDhQVVVSRFh2UTM5NHBvcWdHRW1Ta3J1THFPQ0FVNHdnZ0ZLTUE0RwpBMVVkRHdFQi93UUVBd0lIZ0RBVEJnTlZIU1VFRERBS0JnZ3JCZ0VGQlFjREF6QWRCZ05WSFE0RUZnUVVvM0tuCmpKUVowWGZpZ2JENWIwT1ZOTjB4cVNvd0h3WURWUjBqQkJnd0ZvQVUzOVBwejFZa0VaYjVxTmpwS0ZXaXhpNFkKWkQ4d0p3WURWUjBSQVFIL0JCMHdHNEVaWkdGdWFXVnNMbUpsZG1WdWFYVnpRR2R0WVdsc0xtTnZiVEFzQmdvcgpCZ0VFQVlPL01BRUJCQjVvZEhSd2N6b3ZMMmRwZEdoMVlpNWpiMjB2Ykc5bmFXNHZiMkYxZEdnd2dZc0dDaXNHCkFRUUIxbmtDQkFJRWZRUjdBSGtBZHdEZFBUQnF4c2NSTW1NWkhoeVpaemNDb2twZXVONDhyZitIaW5LQUx5bnUKamdBQUFZU3R1Qkh5QUFBRUF3QklNRVlDSVFETTVZU1EvR0w2S0k1UjlPZGNuL3BTaytxVkQ2YnNMODMrRXA5UgoyaFdUYXdJaEFLMWppMWxaNTZEc2Z1TGZYN2JCQzluYlIzRWx4YWxCaHYxelFYTVU3dGx3TUFvR0NDcUdTTTQ5CkJBTURBMmNBTUdRQ01CSzh0c2dIZWd1aCtZaGVsM1BpakhRbHlKMVE1SzY0cDB4cURkbzdXNGZ4Zm9BUzl4clAKczJQS1FjZG9EOWJYd2dJd1g2ekxqeWJaa05IUDV4dEJwN3ZLMkZZZVp0ME9XTFJsVWxsY1VETDNULzdKUWZ3YwpHU3E2dlZCTndKMDB3OUhSCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K","rekorBundle":{"SignedEntryTimestamp":"MEUCIC3c+21v9pk6o4BpB/dRAM9lGnyWLi3Xnc+i8LmnNJmeAiEAiqZJbZHx3Idnw+zXv6yM0ipPw/p16R28YGuCJFQ1u8U=","Payload":{"body":"eyJhcGlWZXJzaW9uIjoiMC4wLjEiLCJraW5kIjoiaGFzaGVkcmVrb3JkIiwic3BlYyI6eyJkYXRhIjp7Imhhc2giOnsiYWxnb3JpdGhtIjoic2hhMjU2IiwidmFsdWUiOiI0YmM0NTNiNTNjYjNkOTE0YjQ1ZjRiMjUwMjk0MjM2YWRiYTJjMGUwOWZmNmYwMzc5Mzk0OWU3ZTM5ZmQ0Y2MxIn19LCJzaWduYXR1cmUiOnsiY29udGVudCI6Ik1FUUNJR3AxWFpQNXphSW1vc3JCaERQQ2RYbjNmOHhJOUZIR0xzR1Z4NlVlUlBDZ0FpQXQ1R3JzZFFoT0tuWmNBM0VXZWN2Z0pTSHpDSWpXaWZGQlFrRDdIZHN5bWc9PSIsInB1YmxpY0tleSI6eyJjb250ZW50IjoiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVTnhSRU5EUVdrclowRjNTVUpCWjBsVlZGQlhWR1pQTHpGT1VtRlRSbVJsWTJGQlVTOXdRa1JIU25BNGQwTm5XVWxMYjFwSmVtb3dSVUYzVFhjS1RucEZWazFDVFVkQk1WVkZRMmhOVFdNeWJHNWpNMUoyWTIxVmRWcEhWakpOVWpSM1NFRlpSRlpSVVVSRmVGWjZZVmRrZW1SSE9YbGFVekZ3WW01U2JBcGpiVEZzV2tkc2FHUkhWWGRJYUdOT1RXcEplRTFVU1RGTlJHTjZUbnBGZVZkb1kwNU5ha2w0VFZSSk1VMUVZekJPZWtWNVYycEJRVTFHYTNkRmQxbElDa3R2V2tsNmFqQkRRVkZaU1V0dldrbDZhakJFUVZGalJGRm5RVVZLVVZFMFZ5ODFXRkE1YlRSWllsZFNRbEYwU0VkWGQyNDVkVlZvWVdVek9GVndZMG9LY0VWTk0wUlBjelI2VnpSTlNYSk5abGMwVjFGRU1HWjNjRGhRVlZWU1JGaDJVVE01TkhCdmNXZEhSVzFUYTNKMVRIRlBRMEZWTkhkblowWkxUVUUwUndwQk1WVmtSSGRGUWk5M1VVVkJkMGxJWjBSQlZFSm5UbFpJVTFWRlJFUkJTMEpuWjNKQ1owVkdRbEZqUkVGNlFXUkNaMDVXU0ZFMFJVWm5VVlZ2TTB0dUNtcEtVVm93V0dacFoySkVOV0l3VDFaT1RqQjRjVk52ZDBoM1dVUldVakJxUWtKbmQwWnZRVlV6T1ZCd2VqRlphMFZhWWpWeFRtcHdTMFpYYVhocE5Ga0tXa1E0ZDBwM1dVUldVakJTUVZGSUwwSkNNSGRITkVWYVdrZEdkV0ZYVm5OTWJVcHNaRzFXZFdGWVZucFJSMlIwV1Zkc2MweHRUblppVkVGelFtZHZjZ3BDWjBWRlFWbFBMMDFCUlVKQ1FqVnZaRWhTZDJONmIzWk1NbVJ3WkVkb01WbHBOV3BpTWpCMllrYzVibUZYTkhaaU1rWXhaRWRuZDJkWmMwZERhWE5IQ2tGUlVVSXhibXREUWtGSlJXWlJVamRCU0d0QlpIZEVaRkJVUW5GNGMyTlNUVzFOV2tob2VWcGFlbU5EYjJ0d1pYVk9ORGh5Wml0SWFXNUxRVXg1Ym5VS2FtZEJRVUZaVTNSMVFraDVRVUZCUlVGM1FrbE5SVmxEU1ZGRVRUVlpVMUV2UjB3MlMwazFVamxQWkdOdUwzQlRheXR4VmtRMlluTk1PRE1yUlhBNVVnb3lhRmRVWVhkSmFFRkxNV3BwTVd4YU5UWkVjMloxVEdaWU4ySkNRemx1WWxJelJXeDRZV3hDYUhZeGVsRllUVlUzZEd4M1RVRnZSME5EY1VkVFRUUTVDa0pCVFVSQk1tTkJUVWRSUTAxQ1N6aDBjMmRJWldkMWFDdFphR1ZzTTFCcGFraFJiSGxLTVZFMVN6WTBjREI0Y1VSa2J6ZFhOR1o0Wm05QlV6bDRjbEFLY3pKUVMxRmpaRzlFT1dKWWQyZEpkMWcyZWt4cWVXSmFhMDVJVURWNGRFSndOM1pMTWtaWlpWcDBNRTlYVEZKc1ZXeHNZMVZFVEROVUx6ZEtVV1ozWXdwSFUzRTJkbFpDVG5kS01EQjNPVWhTQ2kwdExTMHRSVTVFSUVORlVsUkpSa2xEUVZSRkxTMHRMUzBLIn19fX0=","integratedTime":1669361833,"logIndex":7810348,"logID":"c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d"}}} "#; let (key_id, key) = get_rekor_public_key(); let rekor_pub_keys = BTreeMap::from([(key_id, key)]); let result = SignedArtifactBundle::new_verified(bundle_raw, &rekor_pub_keys); assert!(result.is_ok()); let bundle = result.unwrap(); assert_eq!(bundle.rekor_bundle.payload.log_index, 7810348); } } ================================================ FILE: src/cosign/client.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::collections::BTreeMap; use std::ops::Add; use async_trait::async_trait; use oci_client::manifest::OCI_IMAGE_MEDIA_TYPE; use tracing::warn; use super::constants::{SIGSTORE_OCI_MEDIA_TYPE, SIGSTORE_SIGNATURE_ANNOTATION}; use super::{CosignCapabilities, SignatureLayer}; use crate::cosign::signature_layers::build_signature_layers; use crate::crypto::CosignVerificationKey; use crate::registry::{Auth, OciReference, PushResponse}; use crate::{ crypto::certificate_pool::CertificatePool, errors::{Result, SigstoreError}, }; use tracing::debug; /// Used to generate an empty [OCI Configuration](https://github.com/opencontainers/image-spec/blob/v1.0.0/config.md). pub const CONFIG_DATA: &str = "{}"; /// Cosign Client /// /// Instances of `Client` can be built via [`sigstore::cosign::ClientBuilder`](crate::cosign::ClientBuilder). pub struct Client { pub(crate) registry_client: Box, pub(crate) rekor_pub_keys: Option>, pub(crate) fulcio_cert_pool: Option, } #[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] impl CosignCapabilities for Client { async fn triangulate( &mut self, image: &OciReference, auth: &Auth, ) -> Result<(OciReference, String)> { let manifest_digest = self .registry_client .fetch_manifest_digest(&image.oci_reference, &auth.into()) .await?; let reference = OciReference::with_tag( image.registry().to_string(), image.repository().to_string(), manifest_digest.replace(':', "-").add(".sig"), ); Ok((reference, manifest_digest)) } async fn trusted_signature_layers( &mut self, auth: &Auth, source_image_digest: &str, cosign_image: &OciReference, ) -> Result> { let (manifest, layers) = self.fetch_manifest_and_layers(auth, cosign_image).await?; let image_manifest = match manifest { oci_client::manifest::OciManifest::Image(im) => im, oci_client::manifest::OciManifest::ImageIndex(_) => { return Err(SigstoreError::RegistryPullManifestError { image: cosign_image.to_string(), error: "Found a OciImageIndex instead of a OciImageManifest".to_string(), }); } }; let sl = build_signature_layers( &image_manifest, source_image_digest, &layers, self.rekor_pub_keys.as_ref(), self.fulcio_cert_pool.as_ref(), )?; debug!(signature_layers=?sl, ?cosign_image, "trusted signature layers"); Ok(sl) } async fn push_signature( &mut self, annotations: Option>, auth: &Auth, target_reference: &OciReference, signature_layers: Vec, ) -> Result { let layers: Vec = signature_layers .iter() .filter_map(|sl| { match serde_json::to_vec(&sl.simple_signing) { Ok(data) => { let annotations = match &sl.signature { Some(sig) => [(SIGSTORE_SIGNATURE_ANNOTATION.into(), sig.clone())].into(), None => BTreeMap::new(), }; let image_layer = oci_client::client::ImageLayer::new(data, SIGSTORE_OCI_MEDIA_TYPE.into(), Some(annotations)); Some(image_layer) } Err(e) => { warn!(error = ?e, signaturelayer = ?sl, "Skipping SignatureLayer because serialization failed"); None } } }) .collect(); // TODO: Do we need to support OCI Image Configuration? let config = oci_client::client::Config::oci_v1(CONFIG_DATA.as_bytes().to_vec(), None); let mut manifest = oci_client::manifest::OciImageManifest::build(&layers[..], &config, annotations); manifest.media_type = Some(OCI_IMAGE_MEDIA_TYPE.to_string()); self.registry_client .push( &target_reference.oci_reference, &layers[..], config, &auth.into(), Some(manifest), ) .await .map(|r| r.into()) } } impl Client { /// Internal helper method used to fetch data from an OCI registry async fn fetch_manifest_and_layers( &mut self, auth: &Auth, cosign_image: &OciReference, ) -> Result<( oci_client::manifest::OciManifest, Vec, )> { let oci_auth: oci_client::secrets::RegistryAuth = auth.into(); let (manifest, _) = self .registry_client .pull_manifest(&cosign_image.oci_reference, &oci_auth) .await?; let image_data = self .registry_client .pull( &cosign_image.oci_reference, &oci_auth, vec![SIGSTORE_OCI_MEDIA_TYPE], ) .await?; Ok((manifest, image_data.layers)) } } #[cfg(feature = "mock-client")] #[cfg(test)] mod tests { use super::*; use crate::{ cosign::tests::{get_fulcio_cert_pool, get_rekor_public_key}, mock_client::test::MockOciClient, }; fn build_test_client(mock_client: MockOciClient) -> Client { let (key_id, key) = get_rekor_public_key(); let rekor_pub_keys = BTreeMap::from([(key_id, key)]); Client { registry_client: Box::new(mock_client), rekor_pub_keys: Some(rekor_pub_keys), fulcio_cert_pool: Some(get_fulcio_cert_pool()), } } #[tokio::test] async fn triangulate_sigstore_object() { let image = "docker.io/busybox:latest".parse().unwrap(); let image_digest = String::from("sha256:f3cfc9d0dbf931d3db4685ec659b7ac68e2a578219da4aae65427886e649b06b"); let expected_image = "docker.io/library/busybox:sha256-f3cfc9d0dbf931d3db4685ec659b7ac68e2a578219da4aae65427886e649b06b.sig".parse().unwrap(); let mock_client = MockOciClient { fetch_manifest_digest_response: Some(Ok(image_digest.clone())), pull_response: None, pull_manifest_response: None, push_response: None, }; let mut cosign_client = build_test_client(mock_client); let reference = cosign_client .triangulate(&image, &crate::registry::Auth::Anonymous) .await; assert!(reference.is_ok()); assert_eq!(reference.unwrap(), (expected_image, image_digest)); } } ================================================ FILE: src/cosign/client_builder.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::collections::BTreeMap; use pki_types::CertificateDer; use tracing::info; use crate::{ cosign::client::Client, crypto::{CosignVerificationKey, certificate_pool::CertificatePool}, errors::Result, registry::ClientConfig, trust::TrustRoot, }; /// A builder that generates Client objects. /// /// ## Rekor integration /// /// Rekor integration can be enabled by specifying Rekor's public key. /// This can be provided via a [`crate::trust::ManualTrustRoot`]. /// /// > Note well: the [`trust::sigstore`](crate::trust::sigstore) module provides helper structs and methods /// > to obtain this data from the official TUF repository of the Sigstore project. /// /// ## Fulcio integration /// /// Fulcio integration can be enabled by specifying Fulcio's certificate. /// This can be provided via a [`crate::trust::sigstore::ManualTrustRoot`]. /// /// > Note well: the [`trust::sigstore`](crate::trust::sigstore) module provides helper structs and methods /// > to obtain this data from the official TUF repository of the Sigstore project. /// /// ## Registry caching /// /// The [`cosign::Client`](crate::cosign::Client) interacts with remote container registries to obtain /// the data needed to perform Sigstore verification. /// /// By default, the client will always reach out to the remote registry. However, /// it's possible to enable an in-memory cache. This behaviour can be enabled via /// the [`ClientBuilder::enable_registry_caching`] method. /// /// Each cached entry will automatically expire after 60 seconds. #[derive(Default)] pub struct ClientBuilder<'a> { oci_client_config: ClientConfig, rekor_pub_keys: Option>, fulcio_certs: Vec>, #[cfg(feature = "cached-client")] enable_registry_caching: bool, } impl<'a> ClientBuilder<'a> { /// Enable caching of data returned from remote OCI registries #[cfg(feature = "cached-client")] #[cfg_attr(docsrs, doc(cfg(feature = "cached-client")))] pub fn enable_registry_caching(mut self) -> Self { self.enable_registry_caching = true; self } /// Optional - Configures the roots of trust. /// /// Enables Fulcio and Rekor integration with the given trust repository. /// See [crate::trust::sigstore::TrustRoot] for more details on trust repositories. pub fn with_trust_repository(mut self, repo: &'a R) -> Result { let rekor_keys = repo.rekor_keys()?; if !rekor_keys.is_empty() { self.rekor_pub_keys = Some(rekor_keys); } self.fulcio_certs = repo.fulcio_certs()?; Ok(self) } /// Optional - the configuration to be used by the OCI client. /// /// This can be used when dealing with registries that are not using /// TLS termination, or are using self-signed certificates. pub fn with_oci_client_config(mut self, config: ClientConfig) -> Self { self.oci_client_config = config; self } pub fn build(self) -> Result { let rekor_pub_keys: Option> = self .rekor_pub_keys .map(|keys| { keys.iter() .filter_map( |(key_id, data)| match CosignVerificationKey::try_from_der(data) { Ok(key) => Some((key_id.clone(), key)), Err(e) => { info!("Cannot parse Rekor public key with id {key_id}: {e}"); None } }, ) .collect::>() }) .filter(|m| !m.is_empty()); let fulcio_cert_pool = if self.fulcio_certs.is_empty() { info!("No Fulcio cert has been provided. Fulcio integration disabled"); None } else { let cert_pool = CertificatePool::from_certificates(self.fulcio_certs, [])?; Some(cert_pool) }; let oci_client = oci_client::client::Client::new(self.oci_client_config.clone().into()); let registry_client: Box = { cfg_if::cfg_if! { if #[cfg(feature = "cached-client")] { if self.enable_registry_caching { Box::new(crate::registry::OciCachingClient { registry_client: oci_client, }) as Box } else { Box::new(crate::registry::OciClient { registry_client: oci_client, }) as Box } } else { Box::new(crate::registry::OciClient { registry_client: oci_client, }) as Box } } }; Ok(Client { registry_client, rekor_pub_keys, fulcio_cert_pool, }) } } #[cfg(test)] mod tests { use super::*; use crate::trust::ManualTrustRoot; const OLD_REKOR_ED25519_KEY_DER: &[u8] = &[ 0x30, 0x2a, 0x30, 0x05, 0x06, 0x03, 0x2b, 0x65, 0x70, 0x03, 0x21, 0x00, 0xb7, 0xca, 0xe5, 0xa7, 0x59, 0x27, 0x1b, 0x08, 0xdf, 0x6d, 0xc5, 0xc0, 0x60, 0xf6, 0x00, 0x92, 0x7d, 0x17, 0x88, 0xbc, 0xf5, 0xc7, 0xc3, 0xb8, 0xb7, 0x46, 0x24, 0x12, 0x18, 0x9e, 0xdb, 0x8e, ]; const OLD_REKOR_ED25519_KEY_ID: &str = "cf1199155bddd051268d1f16ac5c0c75c009f6fb5a63f4177f8e18d7051e3fa0"; // Regression test for the ClientBuilder bug: // https://github.com/sigstore/sigstore-rs/issues/508. When the TUF trust root contains an // Ed25519 Rekor key, the resulting Client must have that key in its rekor_pub_keys map. #[test] fn client_builder_parses_ed25519_rekor_key() { let mut trust_root = ManualTrustRoot::default(); trust_root.rekor_keys.insert( OLD_REKOR_ED25519_KEY_ID.to_string(), OLD_REKOR_ED25519_KEY_DER.to_vec(), ); let client = ClientBuilder::default() .with_trust_repository(&trust_root) .expect("with_trust_repository failed") .build() .expect("build failed"); assert!( client.rekor_pub_keys.is_some(), "Expected rekor_pub_keys to be Some after providing an Ed25519 Rekor key, \ but it was None — the key was silently dropped. \ Fix: use CosignVerificationKey::try_from_der(data) instead of \ from_der(data, &SigningScheme::default()) in client_builder.rs" ); let keys = client.rekor_pub_keys.unwrap(); assert_eq!( keys.len(), 1, "Expected exactly 1 parsed Rekor key, got {}", keys.len() ); assert!( keys.contains_key(OLD_REKOR_ED25519_KEY_ID), "Expected parsed key to have the correct key ID" ); } } ================================================ FILE: src/cosign/constants.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use const_oid::ObjectIdentifier; pub(crate) const SIGSTORE_ISSUER_OID: ObjectIdentifier = ObjectIdentifier::new_unwrap("1.3.6.1.4.1.57264.1.1"); pub(crate) const SIGSTORE_GITHUB_WORKFLOW_TRIGGER_OID: ObjectIdentifier = ObjectIdentifier::new_unwrap("1.3.6.1.4.1.57264.1.2"); pub(crate) const SIGSTORE_GITHUB_WORKFLOW_SHA_OID: ObjectIdentifier = ObjectIdentifier::new_unwrap("1.3.6.1.4.1.57264.1.3"); pub(crate) const SIGSTORE_GITHUB_WORKFLOW_NAME_OID: ObjectIdentifier = ObjectIdentifier::new_unwrap("1.3.6.1.4.1.57264.1.4"); pub(crate) const SIGSTORE_GITHUB_WORKFLOW_REPOSITORY_OID: ObjectIdentifier = ObjectIdentifier::new_unwrap("1.3.6.1.4.1.57264.1.5"); pub(crate) const SIGSTORE_GITHUB_WORKFLOW_REF_OID: ObjectIdentifier = ObjectIdentifier::new_unwrap("1.3.6.1.4.1.57264.1.6"); /// OID of Ed25519, which is not included in the RustCrypto repo yet. pub(crate) const ED25519: ObjectIdentifier = ObjectIdentifier::new_unwrap("1.3.101.112"); pub(crate) const SIGSTORE_OCI_MEDIA_TYPE: &str = "application/vnd.dev.cosign.simplesigning.v1+json"; pub(crate) const SIGSTORE_SIGNATURE_ANNOTATION: &str = "dev.cosignproject.cosign/signature"; pub(crate) const SIGSTORE_BUNDLE_ANNOTATION: &str = "dev.sigstore.cosign/bundle"; pub(crate) const SIGSTORE_CERT_ANNOTATION: &str = "dev.sigstore.cosign/certificate"; ================================================ FILE: src/cosign/constraint/annotation.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::collections::{BTreeMap, HashMap}; use serde_json::Value; use tracing::warn; use crate::{cosign::SignatureLayer, errors::Result}; use super::Constraint; /// Constraint for the annotations, which can be verified by [`crate::cosign::verification_constraint::AnnotationVerifier`] /// /// The [`crate::cosign::payload::SimpleSigning`] object can be enriched by a signer /// with more annotations. /// /// A [`AnnotationMarker`] helps to add annotations to the [`crate::cosign::payload::SimpleSigning`] /// of the given [`SignatureLayer`]. /// /// Warning: The signing step must not happen until all [`AnnotationMarker`] /// have already performed `add_constraint`. #[derive(Debug)] pub struct AnnotationMarker { pub annotations: HashMap, } impl AnnotationMarker { pub fn new(annotations: HashMap) -> Self { Self { annotations } } } impl Constraint for AnnotationMarker { fn add_constraint(&self, signature_layer: &mut SignatureLayer) -> Result { let mut annotations = match &signature_layer.simple_signing.optional { Some(opt) => { warn!(optional = ?opt, "already has an annotation field"); opt.extra.clone() } None => BTreeMap::new(), }; for (k, v) in &self.annotations { if annotations.contains_key(k) && annotations[k] != *v { warn!(key = ?k, "extra field already has a value"); return Ok(false); } annotations.insert(k.to_owned(), Value::String(v.into())); } let mut opt = signature_layer .simple_signing .optional .clone() .unwrap_or_default(); opt.extra = annotations; signature_layer.simple_signing.optional = Some(opt); Ok(true) } } ================================================ FILE: src/cosign/constraint/mod.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Structs that can be used to add constraints to [`crate::cosign::SignatureLayer`] //! with special business logic. //! //! This module provides some common kinds of constraints: //! * [`PrivateKeySigner`]: Attaching a signature //! * [`AnnotationMarker`]: Adding extra annotations //! //! Developers can define ad-hoc constraint logic by creating a Struct that //! implements the [`Constraint`] trait //! //! ## Warining //! Because [`PrivateKeySigner`] will sign the whole data of a given //! [`crate::cosign::SignatureLayer`], developers **must** ensure that //! a [`PrivateKeySigner`] is the last constraint to be applied on a //! [`crate::cosign::SignatureLayer`]. Before that, all constraints that //! may modify the content of the [`crate::cosign::SignatureLayer`] should //! have been applied already. use super::SignatureLayer; use crate::errors::Result; pub type SignConstraintVec = Vec>; pub type SignConstraintRefVec<'a> = Vec<&'a Box>; pub trait Constraint: std::fmt::Debug { /// Given a mutable reference of [`crate::cosign::SignatureLayer`], return /// `true` if the constraint is applied successfully. /// /// Developer can use the /// [`crate::errors::SigstoreError::ApplyConstraintError`] error /// when something goes wrong inside of the application logic. /// /// ``` /// use sigstore::{ /// cosign::constraint::Constraint, /// cosign::signature_layers::SignatureLayer, /// errors::{SigstoreError, Result}, /// }; /// /// #[derive(Debug)] /// struct MyConstraint{} /// /// impl Constraint for MyConstraint { /// fn add_constraint(&self, _sl: &mut SignatureLayer) -> Result { /// Err(SigstoreError::ApplyConstraintError( /// "something went wrong!".to_string())) /// } /// } /// /// ``` fn add_constraint(&self, signature_layer: &mut SignatureLayer) -> Result; } pub mod annotation; pub use annotation::AnnotationMarker; pub mod signature; pub use self::signature::PrivateKeySigner; ================================================ FILE: src/cosign/constraint/signature.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Structs that can be used to sign a [`crate::cosign::SignatureLayer`] use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64_STD_ENGINE}; use tracing::warn; use zeroize::Zeroizing; use crate::{ cosign::SignatureLayer, crypto::{SigStoreSigner, SigningScheme, signing_key::SigStoreKeyPair}, errors::{Result, SigstoreError}, }; use super::Constraint; /// Sign the [`SignatureLayer`] with the given [`SigStoreSigner`]. /// This constraint must be the last one to applied to a [`SignatureLayer`], /// since all the plaintext is defined. #[derive(Debug)] pub struct PrivateKeySigner { key: SigStoreSigner, } impl PrivateKeySigner { /// Create a new [PrivateKeySigner] with given raw PEM data of a /// private key. pub fn new_with_raw( key_raw: Zeroizing>, password: Zeroizing>, signing_scheme: &SigningScheme, ) -> Result { let signer = match password.is_empty() { true => SigStoreKeyPair::from_pem(&key_raw), false => SigStoreKeyPair::from_encrypted_pem(&key_raw, &password), } .map_err(|e| SigstoreError::ApplyConstraintError(e.to_string()))? .to_sigstore_signer(signing_scheme) .map_err(|e| SigstoreError::ApplyConstraintError(e.to_string()))?; Ok(Self { key: signer }) } pub fn new_with_signer(signer: SigStoreSigner) -> Self { Self { key: signer } } } impl Constraint for PrivateKeySigner { fn add_constraint(&self, signature_layer: &mut SignatureLayer) -> Result { if signature_layer.signature.is_some() { warn!(signature = ?signature_layer.signature, "already has signature"); return Ok(false); } signature_layer.raw_data = serde_json::to_vec(&signature_layer.simple_signing)?; let sig = self.key.sign(&signature_layer.raw_data)?; let sig_base64 = BASE64_STD_ENGINE.encode(sig); signature_layer.signature = Some(sig_base64); Ok(true) } } ================================================ FILE: src/cosign/mod.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Structs providing cosign verification capabilities //! //! The focus of this crate is to provide the verification capabilities of cosign, //! not the signing one. //! //! Sigstore verification can be done using [`sigstore::cosign::Client`](crate::cosign::client::Client). //! Instances of this struct can be created via the [`sigstore::cosign::ClientBuilder`](crate::cosign::client_builder::ClientBuilder). //! //! ## What is currently supported //! //! The crate implements the following verification mechanisms: //! //! * Verify using a given key //! * Verify bundle produced by transparency log (Rekor) //! * Verify signature produced in keyless mode, using Fulcio Web-PKI //! //! Signature annotations and certificate email can be provided at verification time. //! //! ## Unit testing inside of our own libraries //! //! In case you want to mock sigstore interactions inside of your own code, you //! can implement the [`CosignCapabilities`] trait inside of your test suite. use std::collections::BTreeMap; use async_trait::async_trait; use tracing::warn; use crate::errors::{Result, SigstoreApplicationConstraintsError, SigstoreVerifyConstraintsError}; use crate::registry::{Auth, PushResponse}; use crate::crypto::{CosignVerificationKey, Signature}; use crate::errors::SigstoreError; use pkcs8::der::Decode; use x509_cert::Certificate; pub mod bundle; pub(crate) mod constants; pub mod signature_layers; pub use signature_layers::SignatureLayer; pub mod client; pub use self::client::Client; pub mod client_builder; pub use self::client_builder::ClientBuilder; pub mod verification_constraint; pub use self::constraint::{Constraint, SignConstraintRefVec}; use self::verification_constraint::{VerificationConstraint, VerificationConstraintRefVec}; pub mod payload; use crate::registry::oci_reference::OciReference; pub use payload::simple_signing; pub mod constraint; #[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] /// Cosign Abilities that have to be implemented by a /// Cosign client pub trait CosignCapabilities { /// Calculate the cosign image reference. /// This is the location cosign stores signatures. async fn triangulate( &mut self, image: &OciReference, auth: &Auth, ) -> Result<(OciReference, String)>; /// Returns the list of [`SignatureLayer`] /// objects that are associated with the given signature object. /// /// Each layer is verified, to ensure it contains legitimate data. /// /// ## Layers with embedded certificate /// /// A signature can contain a certificate, this happens when signatures /// are produced in keyless mode or when a PKCS11 tokens are used. /// /// The certificate is added to [`SignatureLayer::certificate_signature`] /// only when it can be trusted. /// /// In order to trust an embedded certificate, the following prerequisites /// must be satisfied: /// /// * The [`sigstore::cosign::Client`](crate::cosign::client::Client) must /// have been created with Rekor integration enabled (see [`crate::trust::sigstore::ManualTrustRoot`]) /// * The [`sigstore::cosign::Client`](crate::cosign::client::Client) must /// have been created with Fulcio integration enabled (see [`crate::trust::sigstore::ManualTrustRoot]) /// * The layer must include a bundle produced by Rekor /// /// > Note well: the [`trust::sigstore`](crate::trust::sigstore) module provides helper structs and methods /// > to obtain this data from the official TUF repository of the Sigstore project. /// /// When the embedded certificate cannot be verified, [`SignatureLayer::certificate_signature`] /// is going to be `None`. /// /// ## Usage /// /// These returned objects can then be verified against /// [`VerificationConstraints`](crate::cosign::verification_constraint::VerificationConstraint) /// using the [`verify_constraints`] function. async fn trusted_signature_layers( &mut self, auth: &Auth, source_image_digest: &str, cosign_image: &OciReference, ) -> Result>; /// Push [`SignatureLayer`] objects to the registry. This function will do /// the following steps: /// * Generate a series of [`oci_client::client::ImageLayer`]s due to /// the given [`Vec`]. /// * Generate a `OciImageManifest` of [`oci_client::manifest::OciManifest`] /// due to the given `source_image_digest` and `signature_layers`. It supports /// to be extended when newly published /// [Referrers API of OCI Registry v1.1.0](https://github.com/opencontainers/distribution-spec/blob/v1.1.0-rc1/spec.md#listing-referrers), /// is prepared. At that time, /// [an artifact manifest](https://github.com/opencontainers/image-spec/blob/v1.1.0-rc2/artifact.md) /// will be created instead of [an image manifest](https://github.com/opencontainers/image-spec/blob/v1.1.0-rc2/manifest.md). /// * Push the generated manifest together with the layers /// to the `target_reference`. `target_reference` contains information /// about the registry, repository and tag. /// /// The parameters: /// - `annotations`: annotations of the generated manifest /// - `auth`: Credential used to access the registry /// - `target_reference`: target reference to push the manifest /// - `signature_layers`: [`SignatureLayer`] objects containing signature information async fn push_signature( &mut self, annotations: Option>, auth: &Auth, target_reference: &OciReference, signature_layers: Vec, ) -> Result; /// Verifies the signature produced by cosign when signing the given blob via the `cosign sign-blob` command /// /// The parameters: /// * `cert`: a PEM encoded x509 certificate that contains the public key used to verify the signature. /// Note that cert is not double-base64-encoded like the output of sigstore/cosign is. /// * `signature`: the base64 encoded signature of the blob that has to be verified /// * `blob`: the contents of the blob /// /// This function returns `Ok())` when the given signature has been verified, otherwise returns an `Err`. fn verify_blob(cert: &str, signature: &str, blob: &[u8]) -> Result<()> { let pem = pem::parse(cert)?; let cert = Certificate::from_der(pem.contents()).map_err(|e| { SigstoreError::PKCS8SpkiError(format!("parse der into cert failed: {e}")) })?; let spki = cert.tbs_certificate.subject_public_key_info; let ver_key = CosignVerificationKey::try_from(&spki)?; let signature = Signature::Base64Encoded(signature.as_bytes()); ver_key.verify_signature(signature, blob)?; Ok(()) } /// /// Verifies the signature produced by cosign when signing the given blob via the `cosign sign-blob` command /// /// The parameters: /// * `public_key`: the public key used to verify the signature, PEM encoded /// * `signature`: the base64 encoded signature of the blob that has to be verified /// * `blob`: the contents of the blob /// /// This function returns `Ok())` when the given signature has been verified, otherwise returns an `Err`. fn verify_blob_with_public_key(public_key: &str, signature: &str, blob: &[u8]) -> Result<()> { let ver_key = CosignVerificationKey::try_from_pem(public_key.as_bytes())?; let signature = Signature::Base64Encoded(signature.as_bytes()); ver_key.verify_signature(signature, blob)?; Ok(()) } } /// Given a list of trusted `SignatureLayer`, find all the constraints that /// aren't satisfied by the layers. /// /// If there's any unsatisfied constraints it means that the image failed /// verification. /// If there's no unsatisfied constraints it means that the image passed /// verification. /// /// Returns a `Result` with either `Ok()` for passed verification or /// [`SigstoreVerifyConstraintsError`] /// which contains a vector of references to unsatisfied constraints. /// /// See the documentation of the [`cosign::verification_constraint`](crate::cosign::verification_constraint) module for more /// details about how to define verification constraints. pub fn verify_constraints<'a, 'b, I>( signature_layers: &'a [SignatureLayer], constraints: I, ) -> std::result::Result<(), SigstoreVerifyConstraintsError<'b>> where I: Iterator>, { let unsatisfied_constraints: VerificationConstraintRefVec = constraints.filter(|c| { let mut is_c_unsatisfied = true; signature_layers.iter().any( | sl | { // iterate through all layers and find if at least one layer // satisfies constraint. If so, we stop iterating match c.verify(sl) { Ok(is_sl_verified) => { is_c_unsatisfied = !is_sl_verified; is_sl_verified // if true, stop searching } Err(e) => { warn!(error = ?e, constraint = ?c, "Skipping layer because constraint verification returned an error"); // handle errors as verification failures is_c_unsatisfied = true; false // keep searching to see if other layer satisfies } } }); is_c_unsatisfied // if true, constraint gets filtered into result }).collect(); if unsatisfied_constraints.is_empty() { Ok(()) } else { Err(SigstoreVerifyConstraintsError { unsatisfied_constraints, }) } } /// Given a [`SignatureLayer`], apply all the constraints to that. /// /// If there's any constraints that fails to apply, it means the /// application process fails. /// If all constraints succeed applying, it means that this layer /// passes applying constraints process. /// /// Returns a `Result` with either `Ok()` for success or /// [`SigstoreApplicationConstraintsError`] /// which contains a vector of references to unapplied constraints. /// /// See the documentation of the [`cosign::constraint`](crate::cosign::constraint) module for more /// details about how to define constraints. pub fn apply_constraints<'a, 'b, I>( signature_layer: &'a mut SignatureLayer, constraints: I, ) -> std::result::Result<(), SigstoreApplicationConstraintsError<'b>> where I: Iterator>, { let unapplied_constraints: SignConstraintRefVec = constraints .filter(|c| match c.add_constraint(signature_layer) { Ok(is_applied) => !is_applied, Err(e) => { warn!(error = ?e, constraint = ?c, "Applying constraint failed due to error"); true } }) .collect(); if unapplied_constraints.is_empty() { Ok(()) } else { Err(SigstoreApplicationConstraintsError { unapplied_constraints, }) } } #[cfg(test)] mod tests { use pki_types::CertificateDer; use serde_json::json; use super::constraint::{AnnotationMarker, PrivateKeySigner}; use super::verification_constraint::cert_subject_email_verifier::StringVerifier; use super::*; use crate::cosign::signature_layers::CertificateSubject; use crate::cosign::signature_layers::tests::build_correct_signature_layer_with_certificate; use crate::cosign::simple_signing::Optional; use crate::cosign::verification_constraint::{ AnnotationVerifier, CertSubjectEmailVerifier, VerificationConstraintVec, }; use crate::crypto::SigningScheme; use crate::crypto::certificate_pool::CertificatePool; #[cfg(feature = "test-registry")] use testcontainers::{core::WaitFor, runners::AsyncRunner}; pub(crate) const REKOR_PUB_KEY: &str = r#"-----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+2tabdTV5BcGiBIx0a9fAFwr kBbmLSGtks4L3qX6yYY0zufBnhC8Ur/iy55GhWP/9A/bY2LhC30M9+RYtw== -----END PUBLIC KEY-----"#; pub(crate) const REKOR_PUB_KEY_ID: &str = "c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d"; const FULCIO_CRT_1_PEM: &str = r#"-----BEGIN CERTIFICATE----- MIIB+DCCAX6gAwIBAgITNVkDZoCiofPDsy7dfm6geLbuhzAKBggqhkjOPQQDAzAq MRUwEwYDVQQKEwxzaWdzdG9yZS5kZXYxETAPBgNVBAMTCHNpZ3N0b3JlMB4XDTIx MDMwNzAzMjAyOVoXDTMxMDIyMzAzMjAyOVowKjEVMBMGA1UEChMMc2lnc3RvcmUu ZGV2MREwDwYDVQQDEwhzaWdzdG9yZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABLSy A7Ii5k+pNO8ZEWY0ylemWDowOkNa3kL+GZE5Z5GWehL9/A9bRNA3RbrsZ5i0Jcas taRL7Sp5fp/jD5dxqc/UdTVnlvS16an+2Yfswe/QuLolRUCrcOE2+2iA5+tzd6Nm MGQwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYE FMjFHQBBmiQpMlEk6w2uSu1KBtPsMB8GA1UdIwQYMBaAFMjFHQBBmiQpMlEk6w2u Su1KBtPsMAoGCCqGSM49BAMDA2gAMGUCMH8liWJfMui6vXXBhjDgY4MwslmN/TJx Ve/83WrFomwmNf056y1X48F9c4m3a3ozXAIxAKjRay5/aj/jsKKGIkmQatjI8uup Hr/+CxFvaJWmpYqNkLDGRU+9orzh5hI2RrcuaQ== -----END CERTIFICATE-----"#; const FULCIO_CRT_2_PEM: &str = r#"-----BEGIN CERTIFICATE----- MIIB9zCCAXygAwIBAgIUALZNAPFdxHPwjeDloDwyYChAO/4wCgYIKoZIzj0EAwMw KjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0y MTEwMDcxMzU2NTlaFw0zMTEwMDUxMzU2NThaMCoxFTATBgNVBAoTDHNpZ3N0b3Jl LmRldjERMA8GA1UEAxMIc2lnc3RvcmUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAT7 XeFT4rb3PQGwS4IajtLk3/OlnpgangaBclYpsYBr5i+4ynB07ceb3LP0OIOZdxex X69c5iVuyJRQ+Hz05yi+UF3uBWAlHpiS5sh0+H2GHE7SXrk1EC5m1Tr19L9gg92j YzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRY wB5fkUWlZql6zJChkyLQKsXF+jAfBgNVHSMEGDAWgBRYwB5fkUWlZql6zJChkyLQ KsXF+jAKBggqhkjOPQQDAwNpADBmAjEAj1nHeXZp+13NWBNa+EDsDP8G1WWg1tCM WP/WHPqpaVo0jhsweNFZgSs0eE7wYI4qAjEA2WB9ot98sIkoF3vZYdd3/VtWB5b9 TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ -----END CERTIFICATE-----"#; #[cfg(feature = "test-registry")] const SIGNED_IMAGE: &str = "busybox:1.34"; pub(crate) fn get_fulcio_cert_pool() -> CertificatePool { fn pem_to_der(input: &str) -> CertificateDer<'_> { let pem_cert = pem::parse(input).unwrap(); assert_eq!(pem_cert.tag(), "CERTIFICATE"); CertificateDer::from(pem_cert.into_contents()) } let certificates = vec![pem_to_der(FULCIO_CRT_1_PEM), pem_to_der(FULCIO_CRT_2_PEM)]; CertificatePool::from_certificates(certificates, []).unwrap() } pub(crate) fn get_rekor_public_key() -> (String, CosignVerificationKey) { let key = CosignVerificationKey::from_pem(REKOR_PUB_KEY.as_bytes(), &SigningScheme::default()) .expect("Cannot create test REKOR_PUB_KEY"); (REKOR_PUB_KEY_ID.to_string(), key) } #[test] fn verify_constraints_all_satisfied() { let email = "alice@example.com".to_string(); let issuer = "an issuer".to_string(); let mut annotations: BTreeMap = BTreeMap::new(); annotations.insert("key1".into(), "value1".into()); annotations.insert("key2".into(), "value2".into()); let mut layers: Vec = Vec::new(); for _ in 0..5 { let mut sl = build_correct_signature_layer_with_certificate(); let mut cert_signature = sl.certificate_signature.unwrap(); let cert_subj = CertificateSubject::Email(email.clone()); cert_signature.issuer = Some(issuer.clone()); cert_signature.subject = cert_subj; sl.certificate_signature = Some(cert_signature); let mut extra: BTreeMap = annotations .iter() .map(|(k, v)| (k.clone(), json!(v))) .collect(); extra.insert("something extra".into(), json!("value extra")); let mut simple_signing = sl.simple_signing; let optional = Optional { creator: Some("test".into()), timestamp: None, extra, }; simple_signing.optional = Some(optional); sl.simple_signing = simple_signing; layers.push(sl); } let mut constraints: VerificationConstraintVec = Vec::new(); let vc = CertSubjectEmailVerifier { email: StringVerifier::ExactMatch(email.clone()), issuer: Some(StringVerifier::ExactMatch(issuer)), }; constraints.push(Box::new(vc)); let vc = CertSubjectEmailVerifier { email: StringVerifier::ExactMatch(email), issuer: None, }; constraints.push(Box::new(vc)); let vc = AnnotationVerifier { annotations }; constraints.push(Box::new(vc)); verify_constraints(&layers, constraints.iter()).expect("should not return an error"); } #[test] fn verify_constraints_none_satisfied() { let email = "alice@example.com".to_string(); let issuer = "an issuer".to_string(); let wrong_email = "bob@example.com".to_string(); let mut layers: Vec = Vec::new(); for _ in 0..5 { let mut sl = build_correct_signature_layer_with_certificate(); let mut cert_signature = sl.certificate_signature.unwrap(); let cert_subj = CertificateSubject::Email(email.clone()); cert_signature.issuer = Some(issuer.clone()); cert_signature.subject = cert_subj; sl.certificate_signature = Some(cert_signature); let mut extra: BTreeMap = BTreeMap::new(); extra.insert("something extra".into(), json!("value extra")); let mut simple_signing = sl.simple_signing; let optional = Optional { creator: Some("test".into()), timestamp: None, extra, }; simple_signing.optional = Some(optional); sl.simple_signing = simple_signing; layers.push(sl); } let mut constraints: VerificationConstraintVec = Vec::new(); let vc = CertSubjectEmailVerifier { email: StringVerifier::ExactMatch(wrong_email.clone()), issuer: Some(StringVerifier::ExactMatch(issuer)), // correct issuer }; constraints.push(Box::new(vc)); let vc = CertSubjectEmailVerifier { email: StringVerifier::ExactMatch(wrong_email), issuer: None, // missing issuer, more relaxed }; constraints.push(Box::new(vc)); let err = verify_constraints(&layers, constraints.iter()).expect_err("we should have an err"); assert_eq!(err.unsatisfied_constraints.len(), 2); } #[test] fn verify_constraints_some_unsatisfied() { let email = "alice@example.com".to_string(); let issuer = "an issuer".to_string(); let email_incorrect = "bob@example.com".to_string(); let mut layers: Vec = Vec::new(); for _ in 0..5 { let mut sl = build_correct_signature_layer_with_certificate(); let mut cert_signature = sl.certificate_signature.unwrap(); let cert_subj = CertificateSubject::Email(email.clone()); cert_signature.issuer = Some(issuer.clone()); cert_signature.subject = cert_subj; sl.certificate_signature = Some(cert_signature); let mut extra: BTreeMap = BTreeMap::new(); extra.insert("something extra".into(), json!("value extra")); let mut simple_signing = sl.simple_signing; let optional = Optional { creator: Some("test".into()), timestamp: None, extra, }; simple_signing.optional = Some(optional); sl.simple_signing = simple_signing; layers.push(sl); } let mut constraints: VerificationConstraintVec = Vec::new(); let satisfied_constraint = CertSubjectEmailVerifier { email: StringVerifier::ExactMatch(email), issuer: Some(StringVerifier::ExactMatch(issuer)), }; constraints.push(Box::new(satisfied_constraint)); let unsatisfied_constraint = CertSubjectEmailVerifier { email: StringVerifier::ExactMatch(email_incorrect), issuer: None, }; constraints.push(Box::new(unsatisfied_constraint)); let err = verify_constraints(&layers, constraints.iter()).expect_err("we should have an err"); assert_eq!(err.unsatisfied_constraints.len(), 1); } #[test] fn add_constrains_all_succeed() { let mut signature_layer = SignatureLayer::new_unsigned( &"test_image".parse().unwrap(), "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", ) .expect("create SignatureLayer failed"); let signer = SigningScheme::ECDSA_P256_SHA256_ASN1 .create_signer() .expect("create signer failed"); let signer = PrivateKeySigner::new_with_signer(signer); let annotations = [(String::from("key"), String::from("value"))].into(); let annotations = AnnotationMarker::new(annotations); let constrains: Vec> = vec![Box::new(signer), Box::new(annotations)]; apply_constraints(&mut signature_layer, constrains.iter()).expect("no error should occur"); } #[test] fn add_constrain_some_failed() { let mut signature_layer = SignatureLayer::new_unsigned( &"test_image".parse().unwrap(), "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", ) .expect("create SignatureLayer failed"); let signer = SigningScheme::ECDSA_P256_SHA256_ASN1 .create_signer() .expect("create signer failed"); let signer = PrivateKeySigner::new_with_signer(signer); let another_signer_of_same_layer = SigningScheme::ECDSA_P256_SHA256_ASN1 .create_signer() .expect("create signer failed"); let another_signer_of_same_layer = PrivateKeySigner::new_with_signer(another_signer_of_same_layer); let annotations = [(String::from("key"), String::from("value"))].into(); let annotations = AnnotationMarker::new(annotations); let constrains: Vec> = vec![ Box::new(signer), Box::new(annotations), Box::new(another_signer_of_same_layer), ]; apply_constraints(&mut signature_layer, constrains.iter()) .expect_err("no error should occur"); } #[cfg(feature = "test-registry")] #[rstest::rstest] #[case(SigningScheme::RSA_PSS_SHA256(2048))] #[case(SigningScheme::RSA_PSS_SHA384(2048))] #[case(SigningScheme::RSA_PSS_SHA512(2048))] #[case(SigningScheme::RSA_PKCS1_SHA256(2048))] #[case(SigningScheme::RSA_PKCS1_SHA384(2048))] #[case(SigningScheme::RSA_PKCS1_SHA512(2048))] #[case(SigningScheme::ECDSA_P256_SHA256_ASN1)] #[case(SigningScheme::ECDSA_P384_SHA384_ASN1)] #[case(SigningScheme::ED25519)] #[tokio::test] #[serial_test::serial] async fn sign_verify_image(#[case] signing_scheme: SigningScheme) { let test_container = registry_image() .start() .await .expect("failed to start registry"); let port = test_container .get_host_port_ipv4(5000) .await .expect("failed to get port"); let mut client = ClientBuilder::default() .enable_registry_caching() .with_oci_client_config(crate::registry::ClientConfig { protocol: crate::registry::ClientProtocol::HttpsExcept(vec![format!( "localhost:{}", port )]), ..Default::default() }) .build() .expect("failed to create oci client"); let image_ref = format!("localhost:{}/{}", port, SIGNED_IMAGE) .parse::() .expect("failed to parse reference"); prepare_image_to_be_signed(&mut client, &image_ref).await; let (cosign_signature_image, source_image_digest) = client .triangulate(&image_ref, &crate::registry::Auth::Anonymous) .await .expect("get manifest failed"); let mut signature_layer = SignatureLayer::new_unsigned(&image_ref, &source_image_digest) .expect("create SignatureLayer failed"); let signer = signing_scheme .create_signer() .expect("create signer failed"); let pubkey = signer .to_sigstore_keypair() .expect("to keypair failed") .public_key_to_pem() .expect("derive public key failed"); let signer = PrivateKeySigner::new_with_signer(signer); if !signer .add_constraint(&mut signature_layer) .expect("sign SignatureLayer failed") { panic!("failed to sign SignatureLayer"); }; client .push_signature( None, &Auth::Anonymous, &cosign_signature_image, vec![signature_layer], ) .await .expect("push signature failed"); dbg!("start to verify"); let (cosign_image, manifest_digest) = client .triangulate(&image_ref, &Auth::Anonymous) .await .expect("triangulate failed"); let signature_layers = client .trusted_signature_layers(&Auth::Anonymous, &manifest_digest, &cosign_image) .await .expect("get trusted signature layers failed"); let pk_verifier = verification_constraint::PublicKeyVerifier::new(pubkey.as_bytes(), &signing_scheme) .expect("create PublicKeyVerifier failed"); assert_eq!(signature_layers.len(), 1); let res = pk_verifier .verify(&signature_layers[0]) .expect("failed to verify"); assert!(res); } #[cfg(feature = "test-registry")] async fn prepare_image_to_be_signed(client: &mut Client, image_ref: &OciReference) { let data = client .registry_client .pull( &SIGNED_IMAGE.parse().expect("failed to parse image ref"), &oci_client::secrets::RegistryAuth::Anonymous, vec![oci_client::manifest::IMAGE_DOCKER_LAYER_GZIP_MEDIA_TYPE], ) .await .expect("pull test image failed"); client .registry_client .push( &image_ref.oci_reference, &data.layers[..], data.config.clone(), &oci_client::secrets::RegistryAuth::Anonymous, None, ) .await .expect("push test image failed"); } #[cfg(feature = "test-registry")] fn registry_image() -> testcontainers::GenericImage { testcontainers::GenericImage::new("docker.io/library/registry", "2") .with_wait_for(WaitFor::message_on_stderr("listening on ")) } } ================================================ FILE: src/cosign/payload/mod.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! This module defines different kinds of payload to be signed //! in cosign. Now it supports: //! * `SimpleSigning`: Refer to pub mod simple_signing; pub use simple_signing::SimpleSigning; ================================================ FILE: src/cosign/payload/simple_signing.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! This module provides a series of Rust structs that implementation //! the Container signature format described //! [here](https://github.com/containers/image/blob/a5061e5a5f00333ea3a92e7103effd11c6e2f51d/docs/containers-signature.5.md#json-data-format). use crate::registry::OciReference; use serde::{Deserialize, Serialize}; use serde_json::Value; use std::{collections::BTreeMap, fmt}; use tracing::{debug, error, info}; /// Default type name of [`Critical`] when doing cosign signing pub const CRITICAL_TYPE_NAME: &str = "cosign container image signature"; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct SimpleSigning { pub critical: Critical, pub optional: Option, } impl fmt::Display for SimpleSigning { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "{}", serde_json::to_string_pretty(self).map_err(|e| { error!(error=?e, simple_signing=?self, "Cannot convert to JSON"); fmt::Error })? ) } } impl SimpleSigning { /// Create a new simple signing payload due to the given image reference /// and manifest_digest pub fn new(image_ref: &OciReference, manifest_digest: &str) -> Self { Self { critical: Critical { type_name: CRITICAL_TYPE_NAME.to_string(), image: Image { docker_manifest_digest: manifest_digest.to_string(), }, identity: Identity { docker_reference: image_ref.to_string(), }, }, optional: None, } } /// Checks whether all the provided `annotations` are satisfied pub fn satisfies_annotations(&self, annotations: &BTreeMap) -> bool { if annotations.is_empty() { debug!("no annotations have been provided -> returning true"); return true; } match &self.optional { Some(opt) => opt.satisfies_annotations(annotations), None => { info!( simple_signing=?self, ?annotations, "annotations not satisfied because `optional` attribute is None" ); false } } } /// Compares the digest given by the user with the Docker manifest digest /// stored inside of the Critical object pub fn satisfies_manifest_digest(&self, expected_digest: &str) -> bool { let matches = self.critical.image.docker_manifest_digest == expected_digest; if !matches { info!( simple_signing=?self, expected_digest, "expected digest not found" ); } matches } } #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Critical { //TODO: should we validate the contents of this attribute to ensure it's "cosign container image signature"? pub identity: Identity, pub image: Image, #[serde(rename = "type")] pub type_name: String, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "kebab-case")] pub struct Image { pub docker_manifest_digest: String, } #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "kebab-case")] pub struct Identity { pub docker_reference: String, } #[derive(Serialize, Deserialize, Debug, Clone, Default)] pub struct Optional { #[serde(skip_serializing_if = "Option::is_none")] pub creator: Option, #[serde(skip_serializing_if = "Option::is_none")] pub timestamp: Option, #[serde(flatten)] pub extra: BTreeMap, } impl Optional { /// Checks whether all the provided `annotations` are satisfied pub fn satisfies_annotations(&self, annotations: &BTreeMap) -> bool { if self.extra.is_empty() { info!( ?annotations, "Annotations are not satisfied, no annotations are part of the Simple Signing object" ); return false; } for (req_key, req_val) in annotations { match self.extra.get(req_key) { Some(curr_val) => match curr_val { serde_json::Value::String(s) => { if req_val != s { info!( annotation = ?req_key, expected_value = ?req_val, current_value = ?s, "Annotation not satisfied" ); return false; } } serde_json::Value::Number(n) => { let curr_val = n.to_string(); if req_val != &curr_val { info!( annotation = ?req_key, expected_value = ?req_val, current_value = ?n, "Annotation not satisfied" ); return false; } } serde_json::Value::Bool(b) => { let curr_val = if *b { "true" } else { "false" }; if req_val != curr_val { info!( annotation = ?req_key, expected_value = ?req_val, current_value = ?curr_val, "Annotation not satisfied" ); return false; } } _ => { error!( annotation = ?req_key, expected_value = ?req_val, current_value = ?curr_val.to_string(), "Annotation type not handled" ); return false; } }, None => { info!( missing_annotation = ?req_key, layer_annotations= ?self.extra, "Annotation not satisfied"); return false; } } } true } } #[cfg(test)] mod tests { use super::*; use serde_json::json; #[test] fn simple_signing_does_not_satisfy_annotations_when_optional_is_none() { let ss_json = json!({ "critical": { "type": "type_foo", "image": { "docker-manifest-digest": "sha256:something" }, "identity": { "docker-reference": "registry.foo.bar/busybox" } } }); let ss: SimpleSigning = serde_json::from_value(ss_json).unwrap(); let mut annotations: BTreeMap = BTreeMap::new(); annotations.insert(String::from("env"), String::from("prod")); assert!(!ss.satisfies_annotations(&annotations)); } #[test] fn simple_signing_satisfies_empty_annotations_even_when_optional_is_none() { let ss_json = json!({ "critical": { "type": "type_foo", "image": { "docker-manifest-digest": "sha256:something" }, "identity": { "docker-reference": "registry.foo.bar/busybox" } } }); let ss: SimpleSigning = serde_json::from_value(ss_json).unwrap(); let annotations: BTreeMap = BTreeMap::new(); assert!(ss.satisfies_annotations(&annotations)); } #[test] fn optional_has_all_the_required_annotations() { let mut annotations: BTreeMap = BTreeMap::new(); annotations.insert(String::from("env"), String::from("prod")); annotations.insert(String::from("number"), String::from("1")); annotations.insert(String::from("bool"), String::from("true")); let optional_json = json!({ "env": "prod", "number": 1, "bool": true }); let optional: Optional = serde_json::from_value(optional_json).unwrap(); assert!(optional.satisfies_annotations(&annotations)); } #[test] fn optional_does_not_satisfy_annotations_because_one_annotation_is_missing() { let mut annotations: BTreeMap = BTreeMap::new(); annotations.insert(String::from("env"), String::from("prod")); annotations.insert(String::from("owner"), String::from("flavio")); let optional_json = json!({ "owner": "flavio", "team": "devops" }); let optional: Optional = serde_json::from_value(optional_json).unwrap(); assert!(!optional.satisfies_annotations(&annotations)); } #[test] fn optional_does_not_satisfy_annotations_because_one_annotation_has_different_value() { let mut annotations: BTreeMap = BTreeMap::new(); annotations.insert(String::from("env"), String::from("prod")); annotations.insert(String::from("owner"), String::from("flavio")); let optional_json = json!({ "env": "staging", "owner": "flavio", "team": "devops" }); let optional: Optional = serde_json::from_value(optional_json).unwrap(); assert!(!optional.satisfies_annotations(&annotations)); } #[test] fn optional_satisfies_annotations_when_no_annotation_is_provided() { let annotations: BTreeMap = BTreeMap::new(); let optional_json = json!({ "env": "prod", "owner": "flavio", "team": "devops" }); let optional: Optional = serde_json::from_value(optional_json).unwrap(); assert!(optional.satisfies_annotations(&annotations)); } #[test] fn simple_signing_satisfy_manifest_digest_works_as_expected() { let expected_digest = "sha256:something"; let ss_json = json!({ "critical": { "type": "type_foo", "image": { "docker-manifest-digest": expected_digest }, "identity": { "docker-reference": "registry.foo.bar/busybox" } } }); let ss: SimpleSigning = serde_json::from_value(ss_json).unwrap(); assert!(ss.satisfies_manifest_digest(expected_digest)); assert!(!ss.satisfies_manifest_digest("something different")); } } ================================================ FILE: src/cosign/signature_layers.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use const_oid::ObjectIdentifier; use digest::Digest; use oci_client::client::ImageLayer; use serde::Serialize; use std::collections::BTreeMap; use std::fmt; use tracing::{debug, info, warn}; use x509_cert::Certificate; use x509_cert::der::DecodePem; use x509_cert::ext::pkix::SubjectAltName; use x509_cert::ext::pkix::name::GeneralName; use super::bundle::Bundle; use super::constants::{ SIGSTORE_BUNDLE_ANNOTATION, SIGSTORE_CERT_ANNOTATION, SIGSTORE_GITHUB_WORKFLOW_NAME_OID, SIGSTORE_GITHUB_WORKFLOW_REF_OID, SIGSTORE_GITHUB_WORKFLOW_REPOSITORY_OID, SIGSTORE_GITHUB_WORKFLOW_SHA_OID, SIGSTORE_GITHUB_WORKFLOW_TRIGGER_OID, SIGSTORE_ISSUER_OID, SIGSTORE_OCI_MEDIA_TYPE, SIGSTORE_SIGNATURE_ANNOTATION, }; use crate::crypto::certificate_pool::CertificatePool; use crate::registry::oci_reference::OciReference; use crate::{ cosign::simple_signing::SimpleSigning, crypto::{self, CosignVerificationKey, Signature}, errors::{Result, SigstoreError}, }; /// Describe the details of a certificate produced when signing artifacts /// using the keyless mode. #[derive(Clone, Debug, Serialize)] pub struct CertificateSignature { /// The verification key embedded into the Certificate #[serde(skip_serializing)] pub verification_key: CosignVerificationKey, /// The unique ID associated to the identity pub subject: CertificateSubject, /// The issuer used by the signer to authenticate. (e.g. GitHub, GitHub Action, Microsoft, Google,...) pub issuer: Option, /// The trigger of the GitHub workflow (e.g. `push`) pub github_workflow_trigger: Option, /// The commit ID that triggered the GitHub workflow pub github_workflow_sha: Option, /// The name of the GitHub workflow (e.g. `release artifact`) pub github_workflow_name: Option, /// The repository that owns the GitHub workflow (e.g. `octocat/example-repo`) pub github_workflow_repository: Option, /// The Git ref of the commit that triggered the GitHub workflow (e.g. `refs/tags/v0.9.9`) pub github_workflow_ref: Option, } impl fmt::Display for CertificateSignature { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let msg = format!( r#"CertificateSignature - issuer: {:?} - subject: {:?} - GitHub Workflow trigger: {:?} - GitHub Workflow SHA: {:?} - GitHub Workflow name: {:?} - GitHub Workflow repository: {:?} - GitHub Workflow ref: {:?} ---"#, self.issuer, self.subject, self.github_workflow_trigger, self.github_workflow_sha, self.github_workflow_name, self.github_workflow_repository, self.github_workflow_ref, ); write!(f, "{msg}") } } /// Types of identities associated with the signer. #[derive(Clone, Debug, Serialize)] #[serde(tag = "type", content = "value")] pub enum CertificateSubject { /// An email address. This is what is used when the signer authenticated himself using something like his GitHub/Google account Email(String), /// A URL. This is used for example by the OIDC token issued by GitHub Actions Uri(String), } /// Object that contains all the data about a `SimpleSigning` object. /// /// The struct provides some helper methods that can be used at verification /// time. /// /// Note well, the information needed to build a SignatureLayer are spread over /// two places: /// * The manifest of the signature object created by cosign /// * One or more SIGSTORE_OCI_MEDIA_TYPE layers /// /// End users of this library are not supposed to create this object directly. /// `SignatureLayer` objects are instead obtained by using the /// [`sigstore::cosign::Client::trusted_signature_layers`](crate::cosign::client::Client) /// method. #[derive(Clone, Debug, Serialize)] pub struct SignatureLayer { /// The Simple Signing object associated with this layer pub simple_signing: SimpleSigning, /// The digest of the layer pub oci_digest: String, /// The certificate holding the identity of the signer, plus his /// verification key. This exists for signature done with keyless mode or /// when a PKCS11 token was used. /// /// The value of `CertificateSignature` is `None` /// when no certificate was embedded into the /// layer, or when the embedded certificate could not be verified. /// /// Having a `None` value will rightfully cause the /// keyless verifiers like /// [`CertSubjectEmailVerifier`](crate::cosign::verification_constraint::CertSubjectEmailVerifier) /// or /// [`CertSubjectUrlVerifier`](crate::cosign::verification_constraint::CertSubjectUrlVerifier) /// to fail verification. /// However, it will still be possible to use the /// [`PublicKeyVerifier`](crate::cosign::verification_constraint::PublicKeyVerifier) /// to verify the layer. This can be useful to verify signatures produced /// with a PKCS11 token, but with Rekor's integration disabled at /// signature time. pub certificate_signature: Option, /// The bundle produced by Rekor. pub bundle: Option, #[serde(skip_serializing)] pub signature: Option, #[serde(skip_serializing)] pub raw_data: Vec, } impl fmt::Display for SignatureLayer { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let msg = format!( r#"--- # SignatureLayer ## digest {} ## signature {:?} ## bundle: {:?} ## certificate signature {} ## Simple Signing {} ---"#, self.oci_digest, self.signature, self.bundle, self.certificate_signature .clone() .map(|cs| cs.to_string()) .unwrap_or_else(|| "None".to_string()), self.simple_signing, ); write!(f, "{msg}") } } impl SignatureLayer { /// Create a [`SignatureLayer`], this function will generate a [`SimpleSigning`] /// payload due to the given reference of image and the digest of the manifest. /// However, the resulted [`SignatureLayer`] does not have a signature, and it /// should be manually generated. /// /// ## Usage /// ```rust,no_run /// use sigstore::cosign::{SignatureLayer, constraint::PrivateKeySigner, Constraint}; /// use sigstore::crypto::SigningScheme; /// /// async fn func() { /// let mut signature_layer = SignatureLayer::new_unsigned( /// &"example/test".parse().unwrap(), /// "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").expect("create SignatureLayer failed"); /// // Now the SignatureLayer does not have a signature, we need /// // to generate one /// let signer = SigningScheme::ECDSA_P256_SHA256_ASN1.create_signer().expect("create signer failed"); /// let pk_signer = PrivateKeySigner::new_with_signer(signer); /// if pk_signer.add_constraint(&mut signature_layer).expect("unexpected error") { /// println!("sign succeed!"); /// } else { /// println!("sign failed!"); /// } /// } /// /// ``` pub fn new_unsigned(image_ref: &OciReference, manifest_digest: &str) -> Result { let simple_signing = SimpleSigning::new(image_ref, manifest_digest); let payload = serde_json::to_vec(&simple_signing)?; let digest = format!("sha256:{:x}", sha2::Sha256::digest(&payload)); Ok(SignatureLayer { simple_signing, oci_digest: digest, certificate_signature: None, bundle: None, signature: None, raw_data: payload, }) } /// Create a SignatureLayer that can be considered trusted. /// /// Params: /// * `descriptor`: the metadata of the layer, taken from the OCI manifest associated /// with the Sigstore object /// * `layer`: the data referenced by the descriptor /// * `source_image_digest`: the digest of the object that we're trying /// to verify. This is **not** the digest of the signature itself. /// * `rekor_pub_key`: the public key of Rekor, used to verify `bundle` /// entries /// * `fulcio_pub_key`: the public key provided by Fulcio's certificate. /// Used to verify the `certificate` entries /// /// **Note well:** the certificate and bundle added to the final SignatureLayer /// object are to be considered **trusted** and **verified**, according to /// the parameters provided to this method. pub(crate) fn new( descriptor: &oci_client::manifest::OciDescriptor, layer: &oci_client::client::ImageLayer, source_image_digest: &str, rekor_pub_keys: Option<&BTreeMap>, fulcio_cert_pool: Option<&CertificatePool>, ) -> Result { if descriptor.media_type != SIGSTORE_OCI_MEDIA_TYPE { return Err(SigstoreError::SigstoreMediaTypeNotFoundError); } if layer.media_type != SIGSTORE_OCI_MEDIA_TYPE { return Err(SigstoreError::SigstoreMediaTypeNotFoundError); } let layer_digest = layer.clone().sha256_digest(); if descriptor.digest != layer_digest { return Err(SigstoreError::SigstoreLayerDigestMismatchError); } let simple_signing: SimpleSigning = serde_json::from_slice(&layer.data).map_err(|e| { SigstoreError::UnexpectedError(format!( "Cannot convert layer data into SimpleSigning object: {e:?}" )) })?; if !simple_signing.satisfies_manifest_digest(source_image_digest) { return Err(SigstoreError::UnexpectedError( "Simple signing image digest mismatch".to_string(), )); } let annotations = descriptor.annotations.clone().unwrap_or_default(); let signature = Self::get_signature_from_annotations(&annotations)?; let bundle = Self::get_bundle_from_annotations(&annotations, rekor_pub_keys)?; let certificate_signature = Self::get_certificate_signature_from_annotations( &annotations, fulcio_cert_pool, bundle.as_ref(), ); Ok(SignatureLayer { oci_digest: descriptor.digest.clone(), raw_data: layer.data.to_vec(), simple_signing, signature: Some(signature), bundle, certificate_signature, }) } fn get_signature_from_annotations(annotations: &BTreeMap) -> Result { let signature: String = annotations .get(SIGSTORE_SIGNATURE_ANNOTATION) .cloned() .ok_or(SigstoreError::SigstoreAnnotationNotFoundError)?; Ok(signature) } fn get_bundle_from_annotations( annotations: &BTreeMap, rekor_pub_keys: Option<&BTreeMap>, ) -> Result> { let bundle = match annotations.get(SIGSTORE_BUNDLE_ANNOTATION) { Some(value) => match rekor_pub_keys { Some(keys) => Some(Bundle::new_verified(value, keys)?), None => { info!(bundle = ?value, "Ignoring bundle, rekor public key not provided to verification client"); None } }, None => None, }; Ok(bundle) } fn get_certificate_signature_from_annotations( annotations: &BTreeMap, fulcio_cert_pool: Option<&CertificatePool>, bundle: Option<&Bundle>, ) -> Option { let cert_raw = annotations.get(SIGSTORE_CERT_ANNOTATION)?; let fulcio_cert_pool = match fulcio_cert_pool { Some(cp) => cp, None => { info!( reason = "fulcio certificates not provided", "Ignoring certificate annotation" ); return None; } }; let bundle = match bundle { Some(b) => b, None => { info!( reason = "rekor bundle not found", "Ignoring certificate annotation" ); return None; } }; match CertificateSignature::from_certificate(cert_raw.as_bytes(), fulcio_cert_pool, bundle) { Ok(certificate_signature) => Some(certificate_signature), Err(e) => { info!(reason=?e, "Ignoring certificate annotation"); None } } } /// Given a Cosign public key, check whether this Signature Layer has been /// signed by it pub(crate) fn is_signed_by_key(&self, verification_key: &CosignVerificationKey) -> bool { let signature = match &self.signature { Some(sig) => sig, None => { warn!(signature_layer = ?self, "signature not found in the SignatureLayer"); return false; } }; match verification_key.verify_signature( Signature::Base64Encoded(signature.as_bytes()), &self.raw_data, ) { Ok(_) => true, Err(e) => { debug!(signature=signature.as_str(), reason=?e, "Cannot verify signature with the given key"); false } } } } /// Creates a list of [`SignatureLayer`] objects by inspecting /// the given OCI manifest and its associated layers. /// /// **Note well:** when Rekor and Fulcio data has been provided, the /// returned `SignatureLayer` is guaranteed to be /// verified using the given Rekor and Fulcio keys. pub(crate) fn build_signature_layers( manifest: &oci_client::manifest::OciImageManifest, source_image_digest: &str, layers: &[oci_client::client::ImageLayer], rekor_pub_keys: Option<&BTreeMap>, fulcio_cert_pool: Option<&CertificatePool>, ) -> Result> { let mut signature_layers: Vec = Vec::new(); for manifest_layer in &manifest.layers { let matching_layer: Option<&oci_client::client::ImageLayer> = layers.iter().find(|l| { let tmp: ImageLayer = (*l).clone(); tmp.sha256_digest() == manifest_layer.digest }); if let Some(layer) = matching_layer { match SignatureLayer::new( manifest_layer, layer, source_image_digest, rekor_pub_keys, fulcio_cert_pool, ) { Ok(sl) => signature_layers.push(sl), Err(e) => { info!(error = ?e, "Skipping OCI layer because of error"); } } } } if signature_layers.is_empty() { Err(SigstoreError::SigstoreNoVerifiedLayer) } else { Ok(signature_layers) } } impl CertificateSignature { /// Ensures the given certificate can be trusted, then extracts /// its details and returns them as a `CertificateSignature` object pub(crate) fn from_certificate( cert_pem: &[u8], fulcio_cert_pool: &CertificatePool, trusted_bundle: &Bundle, ) -> Result { let cert = Certificate::from_pem(cert_pem) .map_err(|e| SigstoreError::X509Error(format!("parse from pem: {e}")))?; let integrated_time = trusted_bundle.payload.integrated_time; // ensure the certificate has been issued by Fulcio fulcio_cert_pool.verify_pem_cert( cert_pem, Some(pki_types::UnixTime::since_unix_epoch( cert.tbs_certificate.validity.not_before.to_unix_duration(), )), )?; crypto::certificate::is_trusted(&cert, integrated_time)?; let subject = CertificateSubject::from_certificate(&cert)?; let verification_key = CosignVerificationKey::try_from(&cert.tbs_certificate.subject_public_key_info) .map_err(|e| { SigstoreError::X509Error(format!( "cannot extract public key from certificate: {e}" )) })?; let issuer = get_cert_extension_by_oid(&cert, SIGSTORE_ISSUER_OID, "Issuer")?; let github_workflow_trigger = get_cert_extension_by_oid( &cert, SIGSTORE_GITHUB_WORKFLOW_TRIGGER_OID, "GitHub Workflow trigger", )?; let github_workflow_sha = get_cert_extension_by_oid( &cert, SIGSTORE_GITHUB_WORKFLOW_SHA_OID, "GitHub Workflow sha", )?; let github_workflow_name = get_cert_extension_by_oid( &cert, SIGSTORE_GITHUB_WORKFLOW_NAME_OID, "GitHub Workflow name", )?; let github_workflow_repository = get_cert_extension_by_oid( &cert, SIGSTORE_GITHUB_WORKFLOW_REPOSITORY_OID, "GitHub Workflow repository", )?; let github_workflow_ref = get_cert_extension_by_oid( &cert, SIGSTORE_GITHUB_WORKFLOW_REF_OID, "GitHub Workflow ref", )?; Ok(CertificateSignature { verification_key, issuer, github_workflow_trigger, github_workflow_sha, github_workflow_name, github_workflow_repository, github_workflow_ref, subject, }) } } fn get_cert_extension_by_oid( cert: &Certificate, ext_oid: ObjectIdentifier, ext_oid_name: &str, ) -> Result> { cert.tbs_certificate .extensions .as_ref() .ok_or(SigstoreError::X509Error( "Certificate's extension is empty".to_string(), ))? .iter() .find(|ext| ext.extn_id == ext_oid) .map(|ext| { String::from_utf8(ext.extn_value.clone().into_bytes()).map_err(|_| { SigstoreError::X509Error(format!( "Certificate's extension Sigstore {ext_oid_name} is not UTF8 compatible" )) }) }) .transpose() } impl CertificateSubject { pub fn from_certificate(certificate: &Certificate) -> Result { let (_, san) = certificate .tbs_certificate .get::() .map_err(|e| SigstoreError::PKCS8Error(format!("get SAN ext failed: {e}")))? .ok_or(SigstoreError::PKCS8Error("No SAN ext found".to_string()))?; for general_name in &san.0 { if let GeneralName::Rfc822Name(name) = general_name { return Ok(CertificateSubject::Email(name.to_string())); } if let GeneralName::UniformResourceIdentifier(uri) = general_name { return Ok(CertificateSubject::Uri(uri.to_string())); } } Err(SigstoreError::CertificateWithIncompleteSubjectAlternativeName) } } #[cfg(test)] pub(crate) mod tests { use super::*; use openssl::x509::X509; use serde_json::json; use crate::cosign::tests::{get_fulcio_cert_pool, get_rekor_public_key}; pub(crate) fn build_correct_signature_layer_without_bundle() -> (SignatureLayer, CosignVerificationKey) { let public_key = r#"-----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAENptdY/l3nB0yqkXLBWkZWQwo6+cu OSWS1X9vPavpiQOoTTGC0xX57OojUadxF1cdQmrsiReWg2Wn4FneJfa8xw== -----END PUBLIC KEY-----"#; let signature = String::from( "MEUCIQD6q/COgzOyW0YH1Dk+CCYSt4uAhm3FDHUwvPI55zwnlwIgE0ZK58ZOWpZw8YVmBapJhBqCfdPekIknimuO0xH8Jh8=", ); let verification_key = CosignVerificationKey::from_pem(public_key.as_bytes(), &SigningScheme::default()) .expect("Cannot create CosignVerificationKey"); let ss_value = json!({ "critical": { "identity": { "docker-reference":"registry-testing.svc.lan/busybox" }, "image":{ "docker-manifest-digest":"sha256:f3cfc9d0dbf931d3db4685ec659b7ac68e2a578219da4aae65427886e649b06b" }, "type":"cosign container image signature" }, "optional":null }); ( SignatureLayer { simple_signing: serde_json::from_value(ss_value.clone()).unwrap(), oci_digest: String::from("digest"), signature: Some(signature), bundle: None, certificate_signature: None, raw_data: serde_json::to_vec(&ss_value).unwrap(), }, verification_key, ) } pub(crate) fn build_bundle() -> Bundle { let bundle_value = json!({ "SignedEntryTimestamp": "MEUCIDBGJijj2FqU25yRWzlEWHqE64XKwUvychBs1bSM1PaKAiEAwcR2u81c42TLBk3lWJqhtB7SnM7Lh0OYEl6Bfa7ZA4s=", "Payload": { "body": "eyJhcGlWZXJzaW9uIjoiMC4wLjEiLCJraW5kIjoicmVrb3JkIiwic3BlYyI6eyJkYXRhIjp7Imhhc2giOnsiYWxnb3JpdGhtIjoic2hhMjU2IiwidmFsdWUiOiJlNzgwMWRlOTM1NTEyZTIyYjIzN2M3YjU3ZTQyY2E0ZDIwZTIxMzRiZGYxYjk4Zjk3NmM4ZjU1ZDljZmU0MDY3In19LCJzaWduYXR1cmUiOnsiY29udGVudCI6Ik1FVUNJR3FXU2N6N3M5YVAyc0dYTkZLZXFpdnczQjZrUFJzNTZBSVRJSG52ZDVpZ0FpRUExa3piYVYyWTV5UEU4MUVOOTJOVUZPbDMxTExKU3Z3c2pGUTA3bTJYcWFBPSIsImZvcm1hdCI6Ing1MDkiLCJwdWJsaWNLZXkiOnsiY29udGVudCI6IkxTMHRMUzFDUlVkSlRpQkRSVkpVU1VaSlEwRlVSUzB0TFMwdENrMUpTVU5rZWtORFFXWjVaMEYzU1VKQlowbFVRU3RRYzJGTGFtRkZXbkZ1TjBsWk9UUmlNV1V2YWtwdWFYcEJTMEpuWjNGb2EycFBVRkZSUkVGNlFYRUtUVkpWZDBWM1dVUldVVkZMUlhkNGVtRlhaSHBrUnpsNVdsTTFhMXBZV1hoRlZFRlFRbWRPVmtKQlRWUkRTRTV3V2pOT01HSXpTbXhOUWpSWVJGUkplQXBOVkVGNVRVUkJNMDFxVlhoT2JHOVlSRlJKZUUxVVFYbE5SRUV6VGtSVmVFNVdiM2RCUkVKYVRVSk5SMEo1Y1VkVFRUUTVRV2RGUjBORGNVZFRUVFE1Q2tGM1JVaEJNRWxCUWtsT1pYZFJRbE14WmpSQmJVNUpSVTVrVEN0VkwwaEtiM1JOVTAwM1drNXVhMVJ1V1dWbWVIZFdPVlJGY25CMmJrRmFNQ3RFZWt3S2VXWkJRVlpoWlVwMFMycEdkbUpQVkdJNFJqRjVhRXBHVlRCWVdTdFNhV3BuWjBWd1RVbEpRa3BVUVU5Q1owNVdTRkU0UWtGbU9FVkNRVTFEUWpSQmR3cEZkMWxFVmxJd2JFSkJkM2REWjFsSlMzZFpRa0pSVlVoQmQwMTNSRUZaUkZaU01GUkJVVWd2UWtGSmQwRkVRV1JDWjA1V1NGRTBSVVpuVVZWTlpqRlNDazFOYzNGT1JrSnlWMko0T0cxU1RtUjRUMnRGUlZsemQwaDNXVVJXVWpCcVFrSm5kMFp2UVZWNVRWVmtRVVZIWVVwRGEzbFZVMVJ5UkdFMVN6ZFZiMGNLTUN0M2QyZFpNRWREUTNOSFFWRlZSa0ozUlVKQ1NVZEJUVWcwZDJaQldVbExkMWxDUWxGVlNFMUJTMGRqUjJnd1pFaEJOa3g1T1hkamJXd3lXVmhTYkFwWk1rVjBXVEk1ZFdSSFZuVmtRekF5VFVST2JWcFVaR3hPZVRCM1RVUkJkMHhVU1hsTmFtTjBXVzFaTTA1VE1XMU9SMWt4V2xSbmQxcEVTVFZPVkZGMUNtTXpVblpqYlVadVdsTTFibUl5T1c1aVIxWm9ZMGRzZWt4dFRuWmlVemxxV1ZSTk1sbFVSbXhQVkZsNVRrUkthVTlYV21wWmFrVXdUbWs1YWxsVE5Xb0tZMjVSZDBsQldVUldVakJTUVZGSUwwSkNXWGRHU1VWVFdtMTRhR1J0YkhaUlIwNW9Zek5TYkdKSGVIQk1iVEZzVFVGdlIwTkRjVWRUVFRRNVFrRk5SQXBCTW10QlRVZFpRMDFSUXpOWk1uVnNVRlJ6VUcxT1V6UmplbUZMWldwbE1FSnVUMUZJZWpWbE5rNUNXREJDY1hnNVdHTmhLM1F5YTA5cE1UZHpiM0JqQ2k5MkwzaElNWGhNZFZCdlEwMVJSRXRPUkRSWGFraG1TM0ZZV0U5bFZYWmFPVUU1TmtSeGNrVjNSMkZ4UjAxMGJrbDFUalJLZWxwWllWVk1Xbko0T1djS2IxaHhjVzh2UXpsUmJrOUlWSFJ2UFFvdExTMHRMVVZPUkNCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2c9PSJ9fX19", "integratedTime": 1634714717, "logIndex": 783607, "logID": "c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d" } }); let bundle: Bundle = serde_json::from_value(bundle_value).expect("Cannot parse bundle"); bundle } pub(crate) fn build_correct_signature_layer_with_certificate() -> SignatureLayer { let ss_value = json!({ "critical": { "identity": { "docker-reference": "registry-testing.svc.lan/kubewarden/disallow-service-nodeport" }, "image": { "docker-manifest-digest": "sha256:5f481572d088dc4023afb35fced9530ced3d9b03bf7299c6f492163cb9f0452e" }, "type": "cosign container image signature" }, "optional": null }); let bundle = build_bundle(); let cert_raw = r#"-----BEGIN CERTIFICATE----- MIICdzCCAfygAwIBAgITA+PsaKjaEZqn7IY94b1e/jJnizAKBggqhkjOPQQDAzAq MRUwEwYDVQQKEwxzaWdzdG9yZS5kZXYxETAPBgNVBAMTCHNpZ3N0b3JlMB4XDTIx MTAyMDA3MjUxNloXDTIxMTAyMDA3NDUxNVowADBZMBMGByqGSM49AgEGCCqGSM49 AwEHA0IABINewQBS1f4AmNIENdL+U/HJotMSM7ZNnkTnYefxwV9TErpvnAZ0+DzL yfAAVaeJtKjFvbOTb8F1yhJFU0XY+RijggEpMIIBJTAOBgNVHQ8BAf8EBAMCB4Aw EwYDVR0lBAwwCgYIKwYBBQUHAwMwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUMf1R MMsqNFBrWbx8mRNdxOkEEYswHwYDVR0jBBgwFoAUyMUdAEGaJCkyUSTrDa5K7UoG 0+wwgY0GCCsGAQUFBwEBBIGAMH4wfAYIKwYBBQUHMAKGcGh0dHA6Ly9wcml2YXRl Y2EtY29udGVudC02MDNmZTdlNy0wMDAwLTIyMjctYmY3NS1mNGY1ZTgwZDI5NTQu c3RvcmFnZS5nb29nbGVhcGlzLmNvbS9jYTM2YTFlOTYyNDJiOWZjYjE0Ni9jYS5j cnQwIAYDVR0RAQH/BBYwFIESZmxhdmlvQGNhc3RlbGxpLm1lMAoGCCqGSM49BAMD A2kAMGYCMQC3Y2ulPTsPmNS4czaKeje0BnOQHz5e6NBX0Bqx9Xca+t2kOi17sopc /v/xH1xLuPoCMQDKND4WjHfKqXXOeUvZ9A96DqrEwGaqGMtnIuN4JzZYaULZrx9g oXqqo/C9QnOHTto= -----END CERTIFICATE-----"#; let fulcio_cert_pool = get_fulcio_cert_pool(); let certificate_signature = CertificateSignature::from_certificate(cert_raw.as_bytes(), &fulcio_cert_pool, &bundle) .expect("Cannot create certificate signature"); SignatureLayer { simple_signing: serde_json::from_value(ss_value.clone()).unwrap(), oci_digest: String::from( "sha256:5f481572d088dc4023afb35fced9530ced3d9b03bf7299c6f492163cb9f0452e", ), signature: Some(String::from( "MEUCIGqWScz7s9aP2sGXNFKeqivw3B6kPRs56AITIHnvd5igAiEA1kzbaV2Y5yPE81EN92NUFOl31LLJSvwsjFQ07m2XqaA=", )), bundle: Some(bundle), certificate_signature: Some(certificate_signature), raw_data: serde_json::to_vec(&ss_value).unwrap(), } } #[test] fn is_signed_by_key_fails_when_signature_is_not_valid() { let (signature_layer, _) = build_correct_signature_layer_without_bundle(); let verification_key = CosignVerificationKey::from_pem( r#"-----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAETJP9cqpUQsn2ggmJniWGjHdlsHzD JsB89BPhZYch0U0hKANx5TY+ncrm0s8bfJxxHoenAEFhwhuXeb4PqIrtoQ== -----END PUBLIC KEY-----"# .as_bytes(), &SigningScheme::default(), ) .expect("Cannot create CosignVerificationKey"); let actual = signature_layer.is_signed_by_key(&verification_key); assert!(!actual, "expected false, got true"); } #[test] fn new_signature_layer_fails_because_bad_descriptor() { let descriptor = oci_client::manifest::OciDescriptor { media_type: "not what you would expected".into(), ..Default::default() }; let layer = oci_client::client::ImageLayer { media_type: super::SIGSTORE_OCI_MEDIA_TYPE.to_string(), data: Vec::new().into(), annotations: None, }; let (key_id, key) = get_rekor_public_key(); let rekor_pub_keys = BTreeMap::from([(key_id, key)]); let fulcio_cert_pool = get_fulcio_cert_pool(); let error = SignatureLayer::new( &descriptor, &layer, "source_image_digest is not relevant now", Some(&rekor_pub_keys), Some(&fulcio_cert_pool), ) .expect_err("Didn't get an error"); let found = matches!(error, SigstoreError::SigstoreMediaTypeNotFoundError); assert!(found, "Got a different error type: {}", error); } #[test] fn new_signature_layer_fails_because_bad_layer() { let descriptor = oci_client::manifest::OciDescriptor { media_type: super::SIGSTORE_OCI_MEDIA_TYPE.to_string(), ..Default::default() }; let layer = oci_client::client::ImageLayer { media_type: "not what you would expect".into(), data: Vec::new().into(), annotations: None, }; let (key_id, key) = get_rekor_public_key(); let rekor_pub_keys = BTreeMap::from([(key_id, key)]); let fulcio_cert_pool = get_fulcio_cert_pool(); let error = SignatureLayer::new( &descriptor, &layer, "source_image_digest is not relevant now", Some(&rekor_pub_keys), Some(&fulcio_cert_pool), ) .expect_err("Didn't get an error"); let found = matches!(error, SigstoreError::SigstoreMediaTypeNotFoundError); assert!(found, "Got a different error type: {}", error); } #[test] fn new_signature_layer_fails_because_checksum_mismatch() { let descriptor = oci_client::manifest::OciDescriptor { media_type: super::SIGSTORE_OCI_MEDIA_TYPE.to_string(), digest: "some digest".into(), ..Default::default() }; let layer = oci_client::client::ImageLayer { media_type: super::SIGSTORE_OCI_MEDIA_TYPE.to_string(), data: "some other contents".into(), annotations: None, }; let (key_id, key) = get_rekor_public_key(); let rekor_pub_keys = BTreeMap::from([(key_id, key)]); let fulcio_cert_pool = get_fulcio_cert_pool(); let error = SignatureLayer::new( &descriptor, &layer, "source_image_digest is not relevant now", Some(&rekor_pub_keys), Some(&fulcio_cert_pool), ) .expect_err("Didn't get an error"); let found = matches!(error, SigstoreError::SigstoreLayerDigestMismatchError); assert!(found, "Got a different error type: {}", error); } #[test] fn get_signature_from_annotations_success() { let mut annotations: BTreeMap = BTreeMap::new(); annotations.insert(SIGSTORE_SIGNATURE_ANNOTATION.into(), "foo".into()); let actual = SignatureLayer::get_signature_from_annotations(&annotations); assert!(actual.is_ok()); } #[test] fn get_signature_from_annotations_failure() { let annotations: BTreeMap = BTreeMap::new(); let actual = SignatureLayer::get_signature_from_annotations(&annotations); assert!(actual.is_err()); } #[test] fn get_bundle_from_annotations_works() { // we are **not** going to test neither the creation from a valid bundle // nor the fauilure because the bundle cannot be verified. These cases // are already covered by Bundle's test suite // // We care only about the only case not tested: to not // fail when no bundle is specified. let annotations: BTreeMap = BTreeMap::new(); let (key_id, key) = get_rekor_public_key(); let rekor_pub_keys = BTreeMap::from([(key_id, key)]); let actual = SignatureLayer::get_bundle_from_annotations(&annotations, Some(&rekor_pub_keys)); assert!(actual.is_ok()); assert!(actual.unwrap().is_none()); } #[test] fn get_certificate_signature_from_annotations_returns_none() { let annotations: BTreeMap = BTreeMap::new(); let fulcio_cert_pool = get_fulcio_cert_pool(); let actual = SignatureLayer::get_certificate_signature_from_annotations( &annotations, Some(&fulcio_cert_pool), None, ); assert!(actual.is_none()); } #[test] fn get_certificate_signature_from_annotations_fails_when_no_bundle_is_given() { let mut annotations: BTreeMap = BTreeMap::new(); // add a fake cert, contents are not relevant annotations.insert(SIGSTORE_CERT_ANNOTATION.to_string(), "a cert".to_string()); let fulcio_cert_pool = get_fulcio_cert_pool(); let cert = SignatureLayer::get_certificate_signature_from_annotations( &annotations, Some(&fulcio_cert_pool), None, ); assert!(cert.is_none()); } #[test] fn get_certificate_signature_from_annotations_fails_when_no_fulcio_pub_key_is_given() { let mut annotations: BTreeMap = BTreeMap::new(); // add a fake cert, contents are not relevant annotations.insert(SIGSTORE_CERT_ANNOTATION.to_string(), "a cert".to_string()); let bundle = build_bundle(); let cert = SignatureLayer::get_certificate_signature_from_annotations( &annotations, None, Some(&bundle), ); assert!(cert.is_none()); } #[test] fn is_signed_by_key() { // a SignatureLayer created with traditional signing let (sl, key) = build_correct_signature_layer_without_bundle(); assert!(sl.is_signed_by_key(&key)); // a SignatureLayer created with keyless signing -> there's no pub key let sl = build_correct_signature_layer_with_certificate(); // fail because the signature layer wasn't signed with the given key let verification_key = CosignVerificationKey::from_pem( r#"-----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAETJP9cqpUQsn2ggmJniWGjHdlsHzD JsB89BPhZYch0U0hKANx5TY+ncrm0s8bfJxxHoenAEFhwhuXeb4PqIrtoQ== -----END PUBLIC KEY-----"# .as_bytes(), &SigningScheme::default(), ) .expect("Cannot create CosignVerificationKey"); assert!(!sl.is_signed_by_key(&verification_key)); } // Testing CertificateSignature use crate::cosign::bundle::Payload; use crate::crypto::SigningScheme; use crate::crypto::tests::{CertGenerationOptions, generate_certificate}; use chrono::{TimeDelta, Utc}; impl TryFrom for crate::registry::Certificate { type Error = anyhow::Error; fn try_from(value: X509) -> std::result::Result { let data = value.to_pem()?; let encoding = crate::registry::CertificateEncoding::Pem; Ok(Self { data, encoding }) } } #[test] fn certificate_signature_from_certificate_using_email() -> anyhow::Result<()> { let expected_email = "test@sigstore.dev".to_string(); let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let issued_cert = generate_certificate( Some(&ca_data), CertGenerationOptions { subject_email: Some(expected_email.clone()), ..Default::default() }, )?; let issued_cert_pem = issued_cert.cert.to_pem()?; let certs = vec![ crate::registry::Certificate::try_from(ca_data.cert) .unwrap() .try_into()?, ]; let cert_pool = CertificatePool::from_certificates(certs, []).unwrap(); let integrated_time = Utc::now() .checked_sub_signed(TimeDelta::try_minutes(1).unwrap()) .unwrap(); let bundle = Bundle { signed_entry_timestamp: "not relevant".to_string(), payload: Payload { body: "not relevant".to_string(), integrated_time: integrated_time.timestamp(), log_index: 0, log_id: "not relevant".to_string(), }, }; let certificate_signature = CertificateSignature::from_certificate(&issued_cert_pem, &cert_pool, &bundle) .expect("Didn't expect an error"); let expected_issuer = match certificate_signature.subject.clone() { CertificateSubject::Email(mail) => mail == expected_email, _ => false, }; assert!( expected_issuer, "Didn't get the expected subject: {:?}", certificate_signature.subject ); Ok(()) } #[test] fn certificate_signature_from_certificate_using_uri() -> anyhow::Result<()> { let expected_url = "https://sigstore.dev/test".to_string(); let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let issued_cert = generate_certificate( Some(&ca_data), CertGenerationOptions { subject_email: None, subject_url: Some(expected_url.clone()), ..Default::default() }, )?; let issued_cert_pem = issued_cert.cert.to_pem()?; let certs = vec![ crate::registry::Certificate::try_from(ca_data.cert) .unwrap() .try_into()?, ]; let cert_pool = CertificatePool::from_certificates(certs, []).unwrap(); let integrated_time = Utc::now() .checked_sub_signed(TimeDelta::try_minutes(1).unwrap()) .unwrap(); let bundle = Bundle { signed_entry_timestamp: "not relevant".to_string(), payload: Payload { body: "not relevant".to_string(), integrated_time: integrated_time.timestamp(), log_index: 0, log_id: "not relevant".to_string(), }, }; let certificate_signature = CertificateSignature::from_certificate(&issued_cert_pem, &cert_pool, &bundle) .expect("Didn't expect an error"); let expected_issuer = match certificate_signature.subject.clone() { CertificateSubject::Uri(url) => url == expected_url, _ => false, }; assert!( expected_issuer, "Didn't get the expected subject: {:?}", certificate_signature.subject ); Ok(()) } #[test] fn certificate_signature_from_certificate_without_email_and_uri() -> anyhow::Result<()> { let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let issued_cert = generate_certificate( Some(&ca_data), CertGenerationOptions { subject_email: None, subject_url: None, ..Default::default() }, )?; let issued_cert_pem = issued_cert.cert.to_pem()?; let certs = vec![ crate::registry::Certificate::try_from(ca_data.cert) .unwrap() .try_into()?, ]; let cert_pool = CertificatePool::from_certificates(certs, []).unwrap(); let integrated_time = Utc::now() .checked_sub_signed(TimeDelta::try_minutes(1).unwrap()) .unwrap(); let bundle = Bundle { signed_entry_timestamp: "not relevant".to_string(), payload: Payload { body: "not relevant".to_string(), integrated_time: integrated_time.timestamp(), log_index: 0, log_id: "not relevant".to_string(), }, }; let error = CertificateSignature::from_certificate(&issued_cert_pem, &cert_pool, &bundle) .expect_err("Didn't get an error"); assert!(matches!( error, SigstoreError::CertificateWithoutSubjectAlternativeName )); Ok(()) } } ================================================ FILE: src/cosign/verification_constraint/annotation_verifier.rs ================================================ use std::collections::BTreeMap; use super::VerificationConstraint; use crate::cosign::signature_layers::SignatureLayer; use crate::errors::Result; /// Verification Constraint for the annotations added by `cosign sign` /// /// The `SimpleSigning` object produced at signature time can be enriched by /// signer with so called "anntoations". /// /// This constraint ensures that all the annotations specified by the user are /// found inside of the SignatureLayer. /// /// It's perfectly find for the SignatureLayer to have additional annotations. /// These will be simply be ignored by the verifier. #[derive(Default, Debug)] pub struct AnnotationVerifier { pub annotations: BTreeMap, } impl VerificationConstraint for AnnotationVerifier { fn verify(&self, signature_layer: &SignatureLayer) -> Result { let verified = signature_layer .simple_signing .satisfies_annotations(&self.annotations); Ok(verified) } } ================================================ FILE: src/cosign/verification_constraint/cert_subject_email_verifier.rs ================================================ use regex::Regex; use std::fmt::Debug; use super::VerificationConstraint; use crate::cosign::signature_layers::{CertificateSubject, SignatureLayer}; use crate::errors::Result; /// Verification Constraint for signatures produced in keyless mode. /// /// Keyless signatures have a x509 certificate associated to them. This /// verifier ensures the SAN portion of the certificate has an email /// attribute that matches the one provided by the user. /// /// It's also possible to specify the `Issuer`, this is the name of the /// identity provider that was used by the user to authenticate. /// /// For example, `cosign` produces the following signature when the user /// relies on GitHub to authenticate himself: /// /// ```hcl /// { /// "critical": { /// // not relevant /// }, /// "optional": { /// "Bundle": { /// // not relevant /// }, /// "Issuer": "https://github.com/login/oauth", /// "Subject": "alice@example.com" /// } /// } /// ``` /// /// The following constraints would be able to enforce this signature to be /// found: /// /// ```rust /// use regex::Regex; /// use sigstore::cosign::verification_constraint::CertSubjectEmailVerifier; /// use sigstore::cosign::verification_constraint::cert_subject_email_verifier::StringVerifier; /// /// // This looks only for the email address of the trusted user /// let vc_email = CertSubjectEmailVerifier{ /// email: StringVerifier::ExactMatch("alice@example.com".to_string()), /// issuer: None, /// }; /// /// // This looks only for emails matching the a pattern /// let vc_email_regex = CertSubjectEmailVerifier{ /// email: StringVerifier::Regex(Regex::new(".*@example.com").unwrap()), /// issuer: None, /// }; /// /// // This ensures the user authenticated via GitHub (see the issuer value), /// // plus the email associated to his GitHub account must be the one specified. /// let vc_email_and_issuer = CertSubjectEmailVerifier{ /// email: StringVerifier::ExactMatch("alice@example.com".to_string()), /// issuer: Some(StringVerifier::ExactMatch("https://github.com/login/oauth".to_string())), /// }; /// /// // This ensures the user authenticated via a service that has a domain /// // matching the regex, plus the email associated to account also matches /// // the regex. /// let vc_email_and_issuer_regex = CertSubjectEmailVerifier{ /// email: StringVerifier::Regex(Regex::new(".*@example.com").unwrap()), /// issuer: Some(StringVerifier::Regex(Regex::new(r"https://github\.com/login/oauth|https://google\.com").unwrap())), /// }; /// ``` /// /// When `issuer` is `None`, the value found inside of the signature's certificate /// is not checked. /// /// For example, given the following constraint: /// ```rust /// use sigstore::cosign::verification_constraint::CertSubjectEmailVerifier; /// use sigstore::cosign::verification_constraint::cert_subject_email_verifier::StringVerifier; /// /// let constraint = CertSubjectEmailVerifier{ /// email: StringVerifier::ExactMatch("alice@example.com".to_string()), /// issuer: None, /// }; /// ``` /// /// Both these signatures would be trusted: /// ```hcl /// [ /// { /// "critical": { /// // not relevant /// }, /// "optional": { /// "Bundle": { /// // not relevant /// }, /// "Issuer": "https://github.com/login/oauth", /// "Subject": "alice@example.com" /// } /// }, /// { /// "critical": { /// // not relevant /// }, /// "optional": { /// "Bundle": { /// // not relevant /// }, /// "Issuer": "https://example.com/login/oauth", /// "Subject": "alice@example.com" /// } /// } /// ] /// ``` pub struct CertSubjectEmailVerifier { pub email: StringVerifier, pub issuer: Option, } impl Debug for CertSubjectEmailVerifier { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut issuer_str = String::new(); if let Some(issuer) = &self.issuer { issuer_str.push_str(&format!(" and {issuer}")); } f.write_fmt(format_args!( "email {}{}", &self.email.to_string(), issuer_str )) } } pub enum StringVerifier { ExactMatch(String), Regex(Regex), } impl StringVerifier { fn verify(&self, s: &str) -> bool { match self { StringVerifier::ExactMatch(s2) => s == *s2, StringVerifier::Regex(r) => r.is_match(s), } } } impl std::fmt::Display for StringVerifier { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { StringVerifier::ExactMatch(s) => f.write_fmt(format_args!("is exactly {s}")), StringVerifier::Regex(r) => f.write_fmt(format_args!("matches regular expression {r}")), } } } impl VerificationConstraint for CertSubjectEmailVerifier { fn verify(&self, signature_layer: &SignatureLayer) -> Result { let verified = match &signature_layer.certificate_signature { Some(signature) => { let email_matches = match &signature.subject { CertificateSubject::Email(e) => self.email.verify(e), _ => false, }; let issuer_matches = match &self.issuer { Some(issuer) => { if let Some(signature_issuer) = &signature.issuer { issuer.verify(signature_issuer) } else { // if the issuer is not present in the signature, we // consider it as a failed constriant false } } None => true, }; email_matches && issuer_matches } _ => false, }; Ok(verified) } } #[cfg(test)] mod tests { use super::*; use crate::cosign::signature_layers::tests::{ build_correct_signature_layer_with_certificate, build_correct_signature_layer_without_bundle, }; use crate::cosign::verification_constraint::CertSubjectUrlVerifier; #[test] fn cert_email_verifier_only_email() { let email = "alice@example.com".to_string(); let mut sl = build_correct_signature_layer_with_certificate(); let mut cert_signature = sl.certificate_signature.unwrap(); let cert_subj = CertificateSubject::Email(email.to_string()); cert_signature.issuer = None; cert_signature.subject = cert_subj; sl.certificate_signature = Some(cert_signature); let vc = CertSubjectEmailVerifier { email: StringVerifier::ExactMatch(email), issuer: None, }; assert!(vc.verify(&sl).unwrap()); let vc = CertSubjectEmailVerifier { email: StringVerifier::ExactMatch("different@email.com".to_string()), issuer: None, }; assert!(!vc.verify(&sl).unwrap()); } #[test] fn cert_email_verifier_email_and_issuer() { let email = "alice@example.com".to_string(); let mut sl = build_correct_signature_layer_with_certificate(); let mut cert_signature = sl.certificate_signature.unwrap(); // The cerificate subject doesn't have an issuer let cert_subj = CertificateSubject::Email(email.clone()); cert_signature.issuer = None; cert_signature.subject = cert_subj; sl.certificate_signature = Some(cert_signature.clone()); // fail because the issuer we want doesn't exist let vc = CertSubjectEmailVerifier { email: StringVerifier::ExactMatch(email.clone()), issuer: Some(StringVerifier::ExactMatch("an issuer".to_string())), }; assert!(!vc.verify(&sl).unwrap()); // The cerificate subject has an issuer let issuer = "the issuer".to_string(); let cert_subj = CertificateSubject::Email(email.clone()); cert_signature.issuer = Some(issuer.clone()); cert_signature.subject = cert_subj; sl.certificate_signature = Some(cert_signature); let vc = CertSubjectEmailVerifier { email: StringVerifier::ExactMatch(email.clone()), issuer: Some(StringVerifier::ExactMatch(issuer.clone())), }; assert!(vc.verify(&sl).unwrap()); let vc = CertSubjectEmailVerifier { email: StringVerifier::ExactMatch(email), issuer: Some(StringVerifier::ExactMatch("another issuer".to_string())), }; assert!(!vc.verify(&sl).unwrap()); // another verifier should fail let vc = CertSubjectUrlVerifier { url: "https://sigstore.dev/test".to_string(), issuer, }; assert!(!vc.verify(&sl).unwrap()); } #[test] fn cert_email_verifier_no_signature() { let (sl, _) = build_correct_signature_layer_without_bundle(); let vc = CertSubjectEmailVerifier { email: StringVerifier::ExactMatch("alice@example.com".to_string()), issuer: None, }; assert!(!vc.verify(&sl).unwrap()); } #[test] fn cert_email_verifier_only_email_regex() { let mut sl = build_correct_signature_layer_with_certificate(); let mut cert_signature = sl.certificate_signature.unwrap(); let cert_subj = CertificateSubject::Email("alice@example.com".to_string()); cert_signature.issuer = None; cert_signature.subject = cert_subj; sl.certificate_signature = Some(cert_signature); let vc = CertSubjectEmailVerifier { email: StringVerifier::Regex(Regex::new(".*@example.com").unwrap()), issuer: None, }; assert!(vc.verify(&sl).unwrap()); let mut sl = build_correct_signature_layer_with_certificate(); let mut cert_signature = sl.certificate_signature.unwrap(); let cert_subj = CertificateSubject::Email("bob@example.com".to_string()); cert_signature.issuer = None; cert_signature.subject = cert_subj; sl.certificate_signature = Some(cert_signature); assert!(vc.verify(&sl).unwrap()); let vc = CertSubjectEmailVerifier { email: StringVerifier::ExactMatch("different@email.com".to_string()), issuer: None, }; assert!(!vc.verify(&sl).unwrap()); } #[test] fn cert_email_verifier_email_and_issuer_regex() { // The cerificate subject doesn't have an issuer let mut sl = build_correct_signature_layer_with_certificate(); let mut cert_signature = sl.certificate_signature.unwrap(); let cert_subj = CertificateSubject::Email("alice@example.com".to_string()); cert_signature.issuer = None; cert_signature.subject = cert_subj; sl.certificate_signature = Some(cert_signature.clone()); // fail because the issuer we want doesn't exist let vc = CertSubjectEmailVerifier { email: StringVerifier::Regex(Regex::new(".*@example.com").unwrap()), issuer: Some(StringVerifier::Regex( Regex::new(r#".*\.github.com"#).unwrap(), )), }; assert!(!vc.verify(&sl).unwrap()); // The cerificate subject has an issuer let mut sl = build_correct_signature_layer_with_certificate(); let mut cert_signature = sl.certificate_signature.unwrap(); let issuer = "some-action.github.com".to_string(); let cert_subj = CertificateSubject::Email("alice@example.com".to_string()); cert_signature.issuer = Some(issuer.clone()); cert_signature.subject = cert_subj; sl.certificate_signature = Some(cert_signature); // pass because the issuer matches the regex let vc = CertSubjectEmailVerifier { email: StringVerifier::Regex(Regex::new(".*@example.com").unwrap()), issuer: Some(StringVerifier::Regex( Regex::new(r#".*\.github.com"#).unwrap(), )), }; assert!(vc.verify(&sl).unwrap()); // The cerificate subject has an incorrect issuer let mut sl = build_correct_signature_layer_with_certificate(); let mut cert_signature = sl.certificate_signature.unwrap(); let issuer = "invalid issuer".to_string(); let cert_subj = CertificateSubject::Email("alice@example.com".to_string()); cert_signature.issuer = Some(issuer.clone()); cert_signature.subject = cert_subj; sl.certificate_signature = Some(cert_signature); // fail because the issuer doesn't matches the regex let vc = CertSubjectEmailVerifier { email: StringVerifier::Regex(Regex::new(".*@example.com").unwrap()), issuer: Some(StringVerifier::Regex( Regex::new(r#".*\.github.com"#).unwrap(), )), }; assert!(!vc.verify(&sl).unwrap()); // The cerificate subject has an invalid email let mut sl = build_correct_signature_layer_with_certificate(); let mut cert_signature = sl.certificate_signature.unwrap(); let issuer = "some-action.github.com".to_string(); let cert_subj = CertificateSubject::Email("alice@somedomain.com".to_string()); cert_signature.issuer = Some(issuer.clone()); cert_signature.subject = cert_subj; sl.certificate_signature = Some(cert_signature); // fail because the email doesn't matches the regex let vc = CertSubjectEmailVerifier { email: StringVerifier::Regex(Regex::new(".*@example.com").unwrap()), issuer: Some(StringVerifier::Regex( Regex::new(r#".*\.github.com"#).unwrap(), )), }; assert!(!vc.verify(&sl).unwrap()); } } ================================================ FILE: src/cosign/verification_constraint/cert_subject_url_verifier.rs ================================================ use super::VerificationConstraint; use crate::cosign::signature_layers::{CertificateSubject, SignatureLayer}; use crate::errors::Result; /// Verification Constraint for signatures produced in keyless mode. /// /// Keyless signatures have a x509 certificate associated to them. This /// verifier ensures the SAN portion of the certificate has a URI /// attribute that matches the one provided by the user. /// /// The constraints needs also the `Issuer` to be provided, this is the name /// of the identity provider that was used by the user to authenticate. /// /// This verifier can be used to check keyless signatures produced in /// non-interactive mode inside of GitHub Actions. /// /// For example, `cosign` produces the following signature when the /// OIDC token is extracted from the GITHUB_TOKEN: /// /// ```hcl /// { /// "critical": { /// // not relevant /// }, /// "optional": { /// "Bundle": { /// // not relevant /// }, /// "Issuer": "https://token.actions.githubusercontent.com", /// "Subject": "https://github.com/flavio/policy-secure-pod-images/.github/workflows/release.yml@refs/heads/main" /// } /// } /// ``` /// /// The following constraint would be able to enforce this signature to be /// found: /// /// ```rust /// use sigstore::cosign::verification_constraint::CertSubjectUrlVerifier; /// /// let vc = CertSubjectUrlVerifier{ /// url: String::from("https://github.com/flavio/policy-secure-pod-images/.github/workflows/release.yml@refs/heads/main"), /// issuer: String::from("https://token.actions.githubusercontent.com"), /// }; /// ``` #[derive(Default, Debug)] pub struct CertSubjectUrlVerifier { pub url: String, pub issuer: String, } impl VerificationConstraint for CertSubjectUrlVerifier { fn verify(&self, signature_layer: &SignatureLayer) -> Result { let verified = match &signature_layer.certificate_signature { Some(signature) => { let url_matches = match &signature.subject { CertificateSubject::Uri(u) => u == &self.url, _ => false, }; let issuer_matches = Some(self.issuer.clone()) == signature.issuer; url_matches && issuer_matches } _ => false, }; Ok(verified) } } #[cfg(test)] mod tests { use super::*; use crate::cosign::{ signature_layers::tests::{ build_correct_signature_layer_with_certificate, build_correct_signature_layer_without_bundle, }, verification_constraint::{ CertSubjectEmailVerifier, cert_subject_email_verifier::StringVerifier, }, }; #[test] fn cert_subject_url_verifier() { let url = "https://sigstore.dev/test".to_string(); let issuer = "the issuer".to_string(); let mut sl = build_correct_signature_layer_with_certificate(); let mut cert_signature = sl.certificate_signature.unwrap(); let cert_subj = CertificateSubject::Uri(url.clone()); cert_signature.issuer = Some(issuer.clone()); cert_signature.subject = cert_subj; sl.certificate_signature = Some(cert_signature); let vc = CertSubjectUrlVerifier { url: url.clone(), issuer: issuer.clone(), }; assert!(vc.verify(&sl).unwrap()); let vc = CertSubjectUrlVerifier { url: "a different url".to_string(), issuer: issuer.clone(), }; assert!(!vc.verify(&sl).unwrap()); let vc = CertSubjectUrlVerifier { url, issuer: "a different issuer".to_string(), }; assert!(!vc.verify(&sl).unwrap()); // A Cert email verifier should also report a non match let vc = CertSubjectEmailVerifier { email: StringVerifier::ExactMatch("alice@example.com".to_string()), issuer: Some(StringVerifier::ExactMatch(issuer)), }; assert!(!vc.verify(&sl).unwrap()); } #[test] fn cert_subject_verifier_no_signature() { let (sl, _) = build_correct_signature_layer_without_bundle(); let vc = CertSubjectUrlVerifier { url: "https://sigstore.dev/test".to_string(), issuer: "an issuer".to_string(), }; assert!(!vc.verify(&sl).unwrap()); } } ================================================ FILE: src/cosign/verification_constraint/certificate_verifier.rs ================================================ use chrono::{DateTime, Utc}; use pkcs8::der::Decode; use pki_types::CertificateDer; use tracing::warn; use x509_cert::Certificate; use super::VerificationConstraint; use crate::cosign::signature_layers::SignatureLayer; use crate::crypto::{CosignVerificationKey, certificate_pool::CertificatePool}; use crate::errors::{Result, SigstoreError}; /// Verify signature layers using the public key defined inside of a x509 certificate #[derive(Debug)] pub struct CertificateVerifier { cert_verification_key: CosignVerificationKey, cert_validity: x509_cert::time::Validity, require_rekor_bundle: bool, } impl CertificateVerifier { /// Create a new instance of `CertificateVerifier` using the PEM encoded /// certificate. /// /// * `cert_bytes`: PEM encoded certificate /// * `require_rekor_bundle`: require the signature layer to have a Rekor /// bundle. Having a Rekor bundle allows further checks to be performed, /// like ensuring the signature has been produced during the validity /// time frame of the certificate. It is recommended to set this value /// to `true` to have a more secure verification process. /// * `cert_chain`: the certificate chain that is used to verify the provided /// certificate. When not specified, the certificate is assumed to be trusted pub fn from_pem( cert_bytes: &[u8], require_rekor_bundle: bool, cert_chain: Option<&[crate::registry::Certificate]>, ) -> Result { let pem = pem::parse(cert_bytes)?; Self::from_der(pem.contents(), require_rekor_bundle, cert_chain) } /// Create a new instance of `CertificateVerifier` using the DER encoded /// certificate. /// /// * `cert_bytes`: DER encoded certificate /// * `require_rekor_bundle`: require the signature layer to have a Rekor /// bundle. Having a Rekor bundle allows further checks to be performed, /// like ensuring the signature has been produced during the validity /// time frame of the certificate. It is recommended to set this value /// to `true` to have a more secure verification process. /// * `cert_chain`: the certificate chain that is used to verify the provided /// certificate. When not specified, the certificate is assumed to be trusted pub fn from_der( cert_bytes: &[u8], require_rekor_bundle: bool, cert_chain: Option<&[crate::registry::Certificate]>, ) -> Result { let cert = Certificate::from_der(cert_bytes) .map_err(|e| SigstoreError::X509Error(format!("parse from der {e}")))?; crate::crypto::certificate::verify_key_usages(&cert)?; crate::crypto::certificate::verify_has_san(&cert)?; crate::crypto::certificate::verify_validity(&cert)?; if let Some(certs) = cert_chain { let certs = certs .iter() .map(|c| CertificateDer::try_from(c.clone())) .collect::>>()?; let cert_pool = CertificatePool::from_certificates(certs, [])?; cert_pool.verify_der_cert(cert_bytes, None)?; } let subject_public_key_info = &cert.tbs_certificate.subject_public_key_info; let cosign_verification_key = CosignVerificationKey::try_from(subject_public_key_info)?; Ok(Self { cert_verification_key: cosign_verification_key, cert_validity: cert.tbs_certificate.validity, require_rekor_bundle, }) } } impl VerificationConstraint for CertificateVerifier { fn verify(&self, signature_layer: &SignatureLayer) -> Result { if !signature_layer.is_signed_by_key(&self.cert_verification_key) { return Ok(false); } match &signature_layer.bundle { Some(bundle) => { let it = DateTime::::from_naive_utc_and_offset( DateTime::from_timestamp(bundle.payload.integrated_time, 0) .ok_or(SigstoreError::UnexpectedError( "timestamp is not legal".into(), ))? .naive_utc(), Utc, ); let not_before: DateTime = self.cert_validity.not_before.to_system_time().into(); if it < not_before { warn!( integrated_time = it.to_string(), not_before = self.cert_validity.not_before.to_string(), "certificate verification: ignoring layer, certificate expired before signature submitted to rekor" ); return Ok(false); } let not_after: DateTime = self.cert_validity.not_after.to_system_time().into(); if it > not_after { warn!( integrated_time = it.to_string(), not_after = self.cert_validity.not_after.to_string(), "certificate verification: ignoring layer, certificate issued after signatured submitted to rekor" ); return Ok(false); } Ok(true) } None => { if self.require_rekor_bundle { warn!("certificate verifier: ignoring layer because rekor bundle is missing"); Ok(false) } else { Ok(true) } } } } } #[cfg(test)] mod tests { use std::time::{Duration, SystemTime}; use super::*; use crate::cosign::bundle::Bundle; use crate::crypto::tests::*; use crate::registry; use pkcs8::der::asn1::UtcTime; use serde_json::json; use x509_cert::time::{Time, Validity}; #[test] fn verify_certificate_() -> anyhow::Result<()> { // use the correct CA chain let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let ca_cert = registry::Certificate { encoding: registry::CertificateEncoding::Pem, data: ca_data.cert.to_pem()?, }; let cert_chain = vec![ca_cert]; let issued_cert = generate_certificate(Some(&ca_data), CertGenerationOptions::default())?; let issued_cert_pem = issued_cert.cert.to_pem()?; let verifier = CertificateVerifier::from_pem(&issued_cert_pem, false, Some(&cert_chain)); assert!(verifier.is_ok()); // Use a different CA chain let another_ca_data = generate_certificate(None, CertGenerationOptions::default())?; let another_ca_cert = registry::Certificate { encoding: registry::CertificateEncoding::Pem, data: another_ca_data.cert.to_pem()?, }; let cert_chain = vec![another_ca_cert]; let verifier = CertificateVerifier::from_pem(&issued_cert_pem, false, Some(&cert_chain)); assert!(verifier.is_err()); // No cert chain let verifier = CertificateVerifier::from_pem(&issued_cert_pem, false, None); assert!(verifier.is_ok()); Ok(()) } /// Create a SignatureLayer using some hard coded value. Returns the /// certificate that can be used to successfully verify the layer fn test_data() -> (SignatureLayer, String) { let ss_value = json!({ "critical": { "identity": { "docker-reference": "registry-testing.svc.lan/kubewarden/pod-privileged" }, "image": { "docker-manifest-digest": "sha256:f1143ec2786e13d7d3335dbb498528438d910648469d3f39647e1cde6914da8d" }, "type": "cosign container image signature" }, "optional": null }); let bundle = build_bundle(); let cert_pem_raw = r#"-----BEGIN CERTIFICATE----- MIICsTCCAligAwIBAgIUR8wkyvHURfBVH6K2uhfTJZItw3owCgYIKoZIzj0EAwIw gZIxCzAJBgNVBAYTAkRFMRAwDgYDVQQIEwdCYXZhcmlhMRIwEAYDVQQHEwlOdXJl bWJlcmcxEzARBgNVBAoTCkt1YmV3YXJkZW4xIzAhBgNVBAsTGkt1YmV3YXJkZW4g SW50ZXJtZWRpYXRlIENBMSMwIQYDVQQDExpLdWJld2FyZGVuIEludGVybWVkaWF0 ZSBDQTAeFw0yMjExMTAxMDM4MDBaFw0yMzExMTAxMDM4MDBaMIGFMQswCQYDVQQG EwJERTEQMA4GA1UECBMHQmF2YXJpYTESMBAGA1UEBxMJTnVyZW1iZXJnMRMwEQYD VQQKEwpLdWJld2FyZGVuMRgwFgYDVQQLEw9LdWJld2FyZGVuIFVzZXIxITAfBgNV BAMTGHVzZXIxLmN1c3RvbS13aWRnZXRzLmNvbTBZMBMGByqGSM49AgEGCCqGSM49 AwEHA0IABEKjBtYLmtwhXNV1/uBanNn5YLD/QY/lfhPleBzenCL7CC2iocu8m3WM PMfd06tE/9HbBAITf64Oc4Mp7abrzp2jgZYwgZMwDgYDVR0PAQH/BAQDAgeAMBMG A1UdJQQMMAoGCCsGAQUFBwMDMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFHsx7jle 7PzGarNvliop+/aTj9GsMB8GA1UdIwQYMBaAFKJu6pRjVGUXVCVkft0YQ+3o1GbQ MB4GA1UdEQQXMBWBE3VzZXIxQGt1YmV3YXJkZW4uaW8wCgYIKoZIzj0EAwIDRwAw RAIgPixAn47x4qLpu7Y/d0oyvbnOGtD5cY7rywdMOO7LYRsCIDsCyGUZIYMFfSrt 3K/aLG49dcv6FKBtZpF5+hYj1zKe -----END CERTIFICATE-----"# .to_string(); let signature_layer = SignatureLayer { simple_signing: serde_json::from_value(ss_value.clone()).unwrap(), oci_digest: String::from( "sha256:f9b817c013972c75de8689d55c0d441c3eb84f6233ac75f6a9c722ea5db0058b", ), signature: Some(String::from( "MEYCIQCIqLEe6hnjEXP/YC2P9OIwEr2yMmwPNHLzvCPaoaXFOQIhALyTouhKNKc2ZVrR0GUQ7J0U5AtlyDZDLGnasAi7XnV/", )), bundle: Some(bundle), certificate_signature: None, raw_data: serde_json::to_vec(&ss_value).unwrap(), }; (signature_layer, cert_pem_raw) } fn build_bundle() -> Bundle { let bundle_value = json!({ "SignedEntryTimestamp": "MEUCIG5TYOXkiPm7RGYgDIPHwRQW5NyoSPuwxvJe4ByB9c37AiEAyD0dVcsiJ5Lp+QY5SL80jDxfc75BtjRnticVf7SiFD0=", "Payload": { "body": "eyJhcGlWZXJzaW9uIjoiMC4wLjEiLCJraW5kIjoiaGFzaGVkcmVrb3JkIiwic3BlYyI6eyJkYXRhIjp7Imhhc2giOnsiYWxnb3JpdGhtIjoic2hhMjU2IiwidmFsdWUiOiJmOWI4MTdjMDEzOTcyYzc1ZGU4Njg5ZDU1YzBkNDQxYzNlYjg0ZjYyMzNhYzc1ZjZhOWM3MjJlYTVkYjAwNThiIn19LCJzaWduYXR1cmUiOnsiY29udGVudCI6Ik1FWUNJUUNJcUxFZTZobmpFWFAvWUMyUDlPSXdFcjJ5TW13UE5ITHp2Q1Bhb2FYRk9RSWhBTHlUb3VoS05LYzJaVnJSMEdVUTdKMFU1QXRseURaRExHbmFzQWk3WG5WLyIsInB1YmxpY0tleSI6eyJjb250ZW50IjoiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVTnpWRU5EUVd4cFowRjNTVUpCWjBsVlVqaDNhM2wyU0ZWU1prSldTRFpMTW5Wb1psUktXa2wwZHpOdmQwTm5XVWxMYjFwSmVtb3dSVUYzU1hjS1oxcEplRU42UVVwQ1owNVdRa0ZaVkVGclVrWk5Va0YzUkdkWlJGWlJVVWxGZDJSRFdWaGFhR050YkdoTlVrbDNSVUZaUkZaUlVVaEZkMnhQWkZoS2JBcGlWMHBzWTIxamVFVjZRVkpDWjA1V1FrRnZWRU5yZERGWmJWWXpXVmhLYTFwWE5IaEpla0ZvUW1kT1ZrSkJjMVJIYTNReFdXMVdNMWxZU210YVZ6Um5DbE5YTlRCYVdFcDBXbGRTY0ZsWVVteEpSVTVDVFZOTmQwbFJXVVJXVVZGRVJYaHdUR1JYU214a01rWjVXa2RXZFVsRmJIVmtSMVo1WWxkV2EyRlhSakFLV2xOQ1JGRlVRV1ZHZHpCNVRXcEZlRTFVUVhoTlJFMDBUVVJDWVVaM01IbE5la1Y0VFZSQmVFMUVUVFJOUkVKaFRVbEhSazFSYzNkRFVWbEVWbEZSUndwRmQwcEZVbFJGVVUxQk5FZEJNVlZGUTBKTlNGRnRSakpaV0Vwd1dWUkZVMDFDUVVkQk1WVkZRbmhOU2xSdVZubGFWekZwV2xoS2JrMVNUWGRGVVZsRUNsWlJVVXRGZDNCTVpGZEtiR1F5Um5sYVIxWjFUVkpuZDBabldVUldVVkZNUlhjNVRHUlhTbXhrTWtaNVdrZFdkVWxHVm5wYVdFbDRTVlJCWmtKblRsWUtRa0ZOVkVkSVZucGFXRWw0VEcxT01XTXpVblppVXpFellWZFNibHBZVW5wTWJVNTJZbFJDV2sxQ1RVZENlWEZIVTAwME9VRm5SVWREUTNGSFUwMDBPUXBCZDBWSVFUQkpRVUpGUzJwQ2RGbE1iWFIzYUZoT1ZqRXZkVUpoYms1dU5WbE1SQzlSV1M5c1ptaFFiR1ZDZW1WdVEwdzNRME15YVc5amRUaHRNMWROQ2xCTlptUXdOblJGTHpsSVlrSkJTVlJtTmpSUFl6Uk5jRGRoWW5KNmNESnFaMXBaZDJkYVRYZEVaMWxFVmxJd1VFRlJTQzlDUVZGRVFXZGxRVTFDVFVjS1FURlZaRXBSVVUxTlFXOUhRME56UjBGUlZVWkNkMDFFVFVGM1IwRXhWV1JGZDBWQ0wzZFJRMDFCUVhkSVVWbEVWbEl3VDBKQ1dVVkdTSE40TjJwc1pRbzNVSHBIWVhKT2RteHBiM0FyTDJGVWFqbEhjMDFDT0VkQk1WVmtTWGRSV1UxQ1lVRkdTMHAxTm5CU2FsWkhWVmhXUTFaclpuUXdXVkVyTTI4eFIySlJDazFDTkVkQk1WVmtSVkZSV0UxQ1YwSkZNMVo2V2xoSmVGRkhkREZaYlZZeldWaEthMXBYTkhWaFZ6aDNRMmRaU1V0dldrbDZhakJGUVhkSlJGSjNRWGNLVWtGSloxQnBlRUZ1TkRkNE5IRk1jSFUzV1M5a01HOTVkbUp1VDBkMFJEVmpXVGR5ZVhka1RVOVBOMHhaVW5ORFNVUnpRM2xIVlZwSldVMUdabE55ZEFvelN5OWhURWMwT1dSamRqWkdTMEowV25CR05TdG9XV294ZWt0bENpMHRMUzB0UlU1RUlFTkZVbFJKUmtsRFFWUkZMUzB0TFMwSyJ9fX19", "integratedTime": 1668077126, "logIndex": 6821636, "logID": "c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d" } }); let bundle: Bundle = serde_json::from_value(bundle_value).expect("Cannot parse bundle"); bundle } #[test] fn verify_correct_layer() { let (signature_layer, cert_pem_raw) = test_data(); let vc = CertificateVerifier::from_pem(cert_pem_raw.as_bytes(), true, None) .expect("cannot create verification constraint"); assert!(vc.verify(&signature_layer).expect("error while verifying")); } #[test] fn rekor_integration() { let (signature_layer, cert_pem_raw) = test_data(); let signature_layer_without_rekor_bundle = SignatureLayer { bundle: None, ..signature_layer.clone() }; assert!(signature_layer_without_rekor_bundle.bundle.is_none()); let vc = CertificateVerifier::from_pem(cert_pem_raw.as_bytes(), true, None) .expect("cannot create verification constraint"); assert!(vc.verify(&signature_layer).expect("error while verifying")); // layer verification fails because there's no rekor bundle assert!( !vc.verify(&signature_layer_without_rekor_bundle) .expect("error while verifying") ); // verification constraint that does not enforce rekor integration let vc = CertificateVerifier::from_pem(cert_pem_raw.as_bytes(), false, None) .expect("cannot create verification constraint"); assert!( vc.verify(&signature_layer_without_rekor_bundle) .expect("error while verifying") ); } #[test] fn detect_signature_created_at_invalid_time() { let (signature_layer, cert_pem_raw) = test_data(); let mut vc = CertificateVerifier::from_pem(cert_pem_raw.as_bytes(), true, None) .expect("cannot create verification constraint"); let not_before = UtcTime::from_system_time( SystemTime::now() .checked_sub(Duration::from_secs(60)) .expect("cannot sub time by 60 seconds"), ) .expect("cannot create not_before timestamp"); let not_after = UtcTime::from_system_time( SystemTime::now() .checked_add(Duration::from_secs(60)) .expect("cannot add time by 60 seconds"), ) .expect("cannot create not_after timestamp"); let validity = Validity { not_before: Time::UtcTime(not_before), not_after: Time::UtcTime(not_after), }; vc.cert_validity = validity; assert!(!vc.verify(&signature_layer).expect("error while verifying")); } } ================================================ FILE: src/cosign/verification_constraint/mod.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Structs that can be used to verify [`crate::cosign::SignatureLayer`] //! with special business logic. //! //! This module provides already the most common kind of verification constraints: //! * [`PublicKeyVerifier`]: ensure a signature has been produced by a specific //! cosign key //! * [`CertSubjectEmailVerifier`]: ensure a signature has been produced in keyless mode, //! plus the email address associated with the signer matches a specific one //! * [`CertSubjectUrlVerifier`]: ensure a signature has been produced in keyless mode, //! plus the certificate SAN has a specific URI inside of it. This can be used to verify //! signatures produced by GitHub Actions. //! //! Developers can define ad-hoc validation logic by creating a Struct that implements //! the [`VerificationConstraintVec`] trait. use super::signature_layers::SignatureLayer; use crate::errors::Result; /// A list of objects implementing the [`VerificationConstraint`] trait pub type VerificationConstraintVec = Vec>; /// A list of references to objects implementing the [`VerificationConstraint`] trait pub type VerificationConstraintRefVec<'a> = Vec<&'a Box>; /// A trait that can be used to define verification constraints objects /// that use a custom verification logic. pub trait VerificationConstraint: std::fmt::Debug { /// Given the `signature_layer` object, return `true` if the verification /// check is satisfied. /// /// Developer can use the /// [`errors::SigstoreError::VerificationConstraintError`](crate::errors::SigstoreError::VerificationConstraintError) /// error when something goes wrong inside of the verification logic. /// /// ``` /// use sigstore::{ /// cosign::verification_constraint::VerificationConstraint, /// cosign::signature_layers::SignatureLayer, /// errors::{SigstoreError, Result}, /// }; /// /// #[derive(Debug)] /// struct MyVerifier{} /// /// impl VerificationConstraint for MyVerifier { /// fn verify(&self, _sl: &SignatureLayer) -> Result { /// Err(SigstoreError::VerificationConstraintError( /// "something went wrong!".to_string())) /// } /// } fn verify(&self, signature_layer: &SignatureLayer) -> Result; } pub mod certificate_verifier; pub use certificate_verifier::CertificateVerifier; pub mod public_key_verifier; pub use public_key_verifier::PublicKeyVerifier; pub mod cert_subject_email_verifier; pub use cert_subject_email_verifier::CertSubjectEmailVerifier; pub mod cert_subject_url_verifier; pub use cert_subject_url_verifier::CertSubjectUrlVerifier; pub mod annotation_verifier; pub use annotation_verifier::AnnotationVerifier; ================================================ FILE: src/cosign/verification_constraint/public_key_verifier.rs ================================================ use super::VerificationConstraint; use crate::cosign::signature_layers::SignatureLayer; use crate::crypto::{CosignVerificationKey, SigningScheme}; use crate::errors::Result; /// Verification Constraint for signatures produced with public/private keys #[derive(Debug)] pub struct PublicKeyVerifier { key: CosignVerificationKey, } impl PublicKeyVerifier { /// Create a new instance of `PublicKeyVerifier`. /// The `key_raw` variable holds a PEM encoded representation of the /// public key to be used at verification time. pub fn new(key_raw: &[u8], signing_scheme: &SigningScheme) -> Result { let key = CosignVerificationKey::from_pem(key_raw, signing_scheme)?; Ok(PublicKeyVerifier { key }) } /// Create a new instance of `PublicKeyVerifier`. /// The `key_raw` variable holds a PEM encoded representation of the /// public key to be used at verification time. The verification /// algorithm will be derived from the public key type: /// * `RSA public key`: `RSA_PKCS1_SHA256` /// * `EC public key with P-256 curve`: `ECDSA_P256_SHA256_ASN1` /// * `EC public key with P-384 curve`: `ECDSA_P384_SHA384_ASN1` /// * `Ed25519 public key`: `Ed25519` pub fn try_from(key_raw: &[u8]) -> Result { let key = CosignVerificationKey::try_from_pem(key_raw)?; Ok(PublicKeyVerifier { key }) } } impl VerificationConstraint for PublicKeyVerifier { fn verify(&self, signature_layer: &SignatureLayer) -> Result { Ok(signature_layer.is_signed_by_key(&self.key)) } } #[cfg(test)] mod tests { use super::*; use crate::cosign::signature_layers::tests::{ build_correct_signature_layer_with_certificate, build_correct_signature_layer_without_bundle, }; #[test] fn pub_key_verifier() { let (sl, key) = build_correct_signature_layer_without_bundle(); let vc = PublicKeyVerifier { key }; assert!(vc.verify(&sl).unwrap()); let sl = build_correct_signature_layer_with_certificate(); assert!(!vc.verify(&sl).unwrap()); } } ================================================ FILE: src/crypto/certificate.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use chrono::{DateTime, Utc}; use const_oid::db::rfc5912::ID_KP_CODE_SIGNING; use thiserror::Error; use x509_cert::{ Certificate, ext::pkix::{ExtendedKeyUsage, KeyUsage, KeyUsages, SubjectAltName, constraints}, }; use crate::errors::{Result, SigstoreError}; /// Ensure the given certificate can be trusted for verifying cosign /// signatures. /// /// The following checks are performed against the given certificate: /// * The certificate has the right set of key usages /// * The certificate cannot be used before the current time pub(crate) fn is_trusted(certificate: &Certificate, integrated_time: i64) -> Result<()> { verify_key_usages(certificate)?; verify_has_san(certificate)?; verify_validity(certificate)?; verify_expiration(certificate, integrated_time)?; Ok(()) } pub(crate) fn verify_key_usages(certificate: &Certificate) -> Result<()> { let (_, key_usage) = certificate .tbs_certificate .get::() .map_err(|_| SigstoreError::CertificateWithoutDigitalSignatureKeyUsage)? .ok_or(SigstoreError::CertificateWithoutDigitalSignatureKeyUsage)?; if key_usage.0.bits() & KeyUsages::DigitalSignature as u16 == 1 { return Err(SigstoreError::CertificateWithoutDigitalSignatureKeyUsage); } let (_, key_ext_usage) = certificate .tbs_certificate .get::() .map_err(|_| SigstoreError::CertificateWithoutCodeSigningKeyUsage)? .ok_or(SigstoreError::CertificateWithoutCodeSigningKeyUsage)?; // code signing if !key_ext_usage.0.contains(&ID_KP_CODE_SIGNING) { return Err(SigstoreError::CertificateWithoutCodeSigningKeyUsage); } Ok(()) } pub(crate) fn verify_has_san(certificate: &Certificate) -> Result<()> { if certificate .tbs_certificate .get::() .map_err(|_| SigstoreError::CertificateWithoutSubjectAlternativeName)? .is_some() { Ok(()) } else { Err(SigstoreError::CertificateWithoutSubjectAlternativeName) } } pub(crate) fn verify_validity(certificate: &Certificate) -> Result<()> { // Comment taken from cosign verification code: // THIS IS IMPORTANT: WE DO NOT CHECK TIMES HERE // THE CERTIFICATE IS TREATED AS TRUSTED FOREVER // WE CHECK THAT THE SIGNATURES WERE CREATED DURING THIS WINDOW let validity = &certificate.tbs_certificate.validity; if std::time::SystemTime::now() < validity.not_before.to_system_time() { Err(SigstoreError::CertificateValidityError( validity.not_before.to_string(), )) } else { Ok(()) } } fn verify_expiration(certificate: &Certificate, integrated_time: i64) -> Result<()> { let it = DateTime::::from_naive_utc_and_offset( DateTime::from_timestamp(integrated_time, 0) .ok_or(SigstoreError::X509Error("timestamp is not legal".into()))? .naive_utc(), Utc, ); let validity = &certificate.tbs_certificate.validity; let not_before: DateTime = validity.not_before.to_system_time().into(); if it < not_before { return Err( SigstoreError::CertificateExpiredBeforeSignaturesSubmittedToRekor { integrated_time: it.to_string(), not_before: validity.not_before.to_string(), }, ); } let not_after: DateTime = validity.not_after.to_system_time().into(); if it > not_after { return Err( SigstoreError::CertificateIssuedAfterSignaturesSubmittedToRekor { integrated_time: it.to_string(), not_after: validity.not_after.to_string(), }, ); } Ok(()) } #[derive(Debug, Error)] pub enum ExtensionErrorKind { #[error("certificate missing extension: {0}")] Missing(&'static str), #[error("certificate extension bit not asserted: {0}")] BitUnset(&'static str), #[error("certificate's {0} extension not marked as critical")] NotCritical(&'static str), } #[derive(Debug, Error)] pub enum NotLeafErrorKind { #[error("certificate is a CA: CAs are not leaves")] IsCA, } #[derive(Debug, Error)] pub enum NotCAErrorKind { #[error("certificate is not a CA: CAs must assert cA and keyCertSign")] NotCA, #[error("certificate is not a root CA")] NotRootCA, #[error("certificate in invalid state: cA={ca}, keyCertSign={key_cert_sign}")] Invalid { ca: bool, key_cert_sign: bool }, } #[derive(Debug, Error)] #[error(transparent)] pub enum CertificateValidationError { #[error("only X509 V3 certificates are supported")] VersionUnsupported, #[error("malformed certificate")] Malformed(#[source] x509_cert::der::Error), NotLeaf(#[from] NotLeafErrorKind), NotCA(#[from] NotCAErrorKind), Extension(#[from] ExtensionErrorKind), } /// Check if the given certificate is a leaf in the context of the Sigstore profile. /// /// * It is not a root or intermediate CA; /// * It has `keyUsage.digitalSignature` /// * It has `CODE_SIGNING` as an `ExtendedKeyUsage`. /// /// This function does not evaluate the trustworthiness of the certificate. pub(crate) fn is_leaf( certificate: &Certificate, ) -> core::result::Result<(), CertificateValidationError> { // NOTE(jl): following structure of sigstore-python over the slightly different handling found // in `verify_key_usages`. let tbs = &certificate.tbs_certificate; // Only V3 certificates should appear in the context of Sigstore; earlier versions of X.509 lack // extensions and have ambiguous CA behavior. if tbs.version != x509_cert::Version::V3 { Err(CertificateValidationError::VersionUnsupported)?; } if is_ca(certificate).is_ok() { Err(NotLeafErrorKind::IsCA)?; }; let digital_signature = match tbs .get::() .map_err(CertificateValidationError::Malformed)? { None => Err(ExtensionErrorKind::Missing("KeyUsage"))?, Some((_, key_usage)) => key_usage.digital_signature(), }; if !digital_signature { Err(ExtensionErrorKind::BitUnset("KeyUsage.digitalSignature"))?; } // Finally, we check to make sure the leaf has an `ExtendedKeyUsages` // extension that includes a codesigning entitlement. Sigstore should // never issue a leaf that doesn't have this extended usage. let extended_key_usage = match tbs .get::() .map_err(CertificateValidationError::Malformed)? { None => Err(ExtensionErrorKind::Missing("ExtendedKeyUsage"))?, Some((_, extended_key_usage)) => extended_key_usage, }; if !extended_key_usage.0.contains(&ID_KP_CODE_SIGNING) { Err(ExtensionErrorKind::BitUnset( "ExtendedKeyUsage.digitalSignature", ))?; } Ok(()) } /// Checks if the given `certificate` is a CA certificate. /// /// This does **not** indicate trustworthiness of the given `certificate`, only if it has the /// appropriate interior state. /// /// This function is **not** naively invertible: users **must** use the dedicated `is_leaf` /// utility function to determine whether a particular leaf upholds Sigstore's invariants. pub(crate) fn is_ca( certificate: &Certificate, ) -> core::result::Result<(), CertificateValidationError> { let tbs = &certificate.tbs_certificate; // Only V3 certificates should appear in the context of Sigstore; earlier versions of X.509 lack // extensions and have ambiguous CA behavior. if tbs.version != x509_cert::Version::V3 { return Err(CertificateValidationError::VersionUnsupported); } // Valid CA certificates must have the following set: // // - `BasicKeyUsage.keyCertSign` // - `BasicConstraints.ca` // // Any other combination of states is inconsistent and invalid, meaning // that we won't treat the certificate as neither a leaf nor a CA. let ca = match tbs .get::() .map_err(CertificateValidationError::Malformed)? { None => Err(ExtensionErrorKind::Missing("BasicConstraints"))?, Some((false, _)) => { // BasicConstraints must be marked as critical, per RFC 5280 4.2.1.9. Err(ExtensionErrorKind::NotCritical("BasicConstraints"))? } Some((true, v)) => v.ca, }; let key_cert_sign = match tbs .get::() .map_err(CertificateValidationError::Malformed)? { None => Err(ExtensionErrorKind::Missing("KeyUsage"))?, Some((_, v)) => v.key_cert_sign(), }; // both states set, this is a CA. if ca && key_cert_sign { return Ok(()); } if !(ca || key_cert_sign) { Err(NotCAErrorKind::NotCA)?; } // Anything else is an invalid state that should never occur. Err(NotCAErrorKind::Invalid { ca, key_cert_sign })? } /// Returns `True` if and only if the given `Certificate` indicates /// that it's a root CA. /// /// This is **not** a verification function, and it does not establish /// the trustworthiness of the given certificate. pub(crate) fn is_root_ca( certificate: &Certificate, ) -> core::result::Result<(), CertificateValidationError> { // NOTE(ww): This function is obnoxiously long to make the different // states explicit. let tbs = &certificate.tbs_certificate; // Only V3 certificates should appear in the context of Sigstore; earlier versions of X.509 lack // extensions and have ambiguous CA behavior. if tbs.version != x509_cert::Version::V3 { return Err(CertificateValidationError::VersionUnsupported); } // Non-CAs can't possibly be root CAs. is_ca(certificate)?; // A certificate that is its own issuer and signer is considered a root CA. if tbs.issuer != tbs.subject { Err(NotCAErrorKind::NotRootCA)? } Ok(()) } #[cfg(test)] mod tests { use super::*; use crate::crypto::tests::*; use chrono::TimeDelta; use x509_cert::der::Decode; #[test] fn verify_cert_key_usages_success() -> anyhow::Result<()> { let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let issued_cert = generate_certificate(Some(&ca_data), CertGenerationOptions::default())?; let issued_cert_pem = issued_cert.cert.to_pem()?; let pem = pem::parse(issued_cert_pem)?; let cert = x509_cert::Certificate::from_der(pem.contents())?; assert!(verify_key_usages(&cert).is_ok()); Ok(()) } #[test] fn verify_cert_key_usages_failure_because_no_digital_signature() -> anyhow::Result<()> { let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let issued_cert = generate_certificate( Some(&ca_data), CertGenerationOptions { digital_signature_key_usage: false, ..Default::default() }, )?; let issued_cert_pem = issued_cert.cert.to_pem()?; let pem = pem::parse(issued_cert_pem)?; let cert = x509_cert::Certificate::from_der(pem.contents())?; let err = verify_key_usages(&cert).expect_err("Was supposed to return an error"); let found = matches!( err, SigstoreError::CertificateWithoutDigitalSignatureKeyUsage ); assert!(found, "Didn't get expected error, got {:?} instead", err); Ok(()) } #[test] fn verify_cert_key_usages_failure_because_no_code_signing() -> anyhow::Result<()> { let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let issued_cert = generate_certificate( Some(&ca_data), CertGenerationOptions { code_signing_extended_key_usage: false, ..Default::default() }, )?; let issued_cert_pem = issued_cert.cert.to_pem()?; let pem = pem::parse(issued_cert_pem)?; let cert = x509_cert::Certificate::from_der(pem.contents())?; let err = verify_key_usages(&cert).expect_err("Was supposed to return an error"); let found = matches!(err, SigstoreError::CertificateWithoutCodeSigningKeyUsage); assert!(found, "Didn't get expected error, got {:?} instead", err); Ok(()) } #[test] fn verify_cert_failure_because_no_san() -> anyhow::Result<()> { let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let issued_cert = generate_certificate( Some(&ca_data), CertGenerationOptions { subject_email: None, subject_url: None, ..Default::default() }, )?; let issued_cert_pem = issued_cert.cert.to_pem()?; let pem = pem::parse(issued_cert_pem)?; let cert = x509_cert::Certificate::from_der(pem.contents())?; let error = verify_has_san(&cert).expect_err("Didn't get an error"); let found = matches!( error, SigstoreError::CertificateWithoutSubjectAlternativeName ); assert!(found, "Didn't get the expected error: {}", error); Ok(()) } #[test] fn verify_cert_validity_success() -> anyhow::Result<()> { let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let issued_cert = generate_certificate(Some(&ca_data), CertGenerationOptions::default())?; let issued_cert_pem = issued_cert.cert.to_pem()?; let pem = pem::parse(issued_cert_pem)?; let cert = x509_cert::Certificate::from_der(pem.contents())?; assert!(verify_validity(&cert).is_ok()); Ok(()) } #[test] fn verify_cert_validity_failure() -> anyhow::Result<()> { let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let issued_cert = generate_certificate( Some(&ca_data), CertGenerationOptions { not_before: Utc::now() .checked_add_signed(TimeDelta::try_days(5).unwrap()) .unwrap(), not_after: Utc::now() .checked_add_signed(TimeDelta::try_days(6).unwrap()) .unwrap(), ..Default::default() }, )?; let issued_cert_pem = issued_cert.cert.to_pem()?; let pem = pem::parse(issued_cert_pem)?; let cert = x509_cert::Certificate::from_der(pem.contents())?; let err = verify_validity(&cert).expect_err("Was expecting an error"); let found = matches!(err, SigstoreError::CertificateValidityError(_)); assert!(found, "Didn't get expected error, got {:?} instead", err); Ok(()) } #[test] fn verify_cert_expiration_success() -> anyhow::Result<()> { let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let integrated_time = Utc::now(); let issued_cert = generate_certificate( Some(&ca_data), CertGenerationOptions { not_before: Utc::now() .checked_sub_signed(TimeDelta::try_days(1).unwrap()) .unwrap(), not_after: Utc::now() .checked_add_signed(TimeDelta::try_days(1).unwrap()) .unwrap(), ..Default::default() }, )?; let issued_cert_pem = issued_cert.cert.to_pem()?; let pem = pem::parse(issued_cert_pem)?; let cert = x509_cert::Certificate::from_der(pem.contents())?; assert!(verify_expiration(&cert, integrated_time.timestamp(),).is_ok()); Ok(()) } #[test] fn verify_cert_expiration_failure() -> anyhow::Result<()> { let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let integrated_time = Utc::now() .checked_add_signed(TimeDelta::try_days(5).unwrap()) .unwrap(); let issued_cert = generate_certificate( Some(&ca_data), CertGenerationOptions { not_before: Utc::now() .checked_sub_signed(TimeDelta::try_days(1).unwrap()) .unwrap(), not_after: Utc::now() .checked_add_signed(TimeDelta::try_days(1).unwrap()) .unwrap(), ..Default::default() }, )?; let issued_cert_pem = issued_cert.cert.to_pem().unwrap(); let pem = pem::parse(issued_cert_pem)?; let cert = x509_cert::Certificate::from_der(pem.contents())?; let err = verify_expiration(&cert, integrated_time.timestamp()) .expect_err("Was expecting an error"); let found = matches!( err, SigstoreError::CertificateIssuedAfterSignaturesSubmittedToRekor { integrated_time: _, not_after: _, } ); assert!(found, "Didn't get expected error, got {:?} instead", err); Ok(()) } } ================================================ FILE: src/crypto/certificate_pool.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use const_oid::db::rfc5280::ID_KP_CODE_SIGNING; use pki_types::{CertificateDer, TrustAnchor, UnixTime}; use webpki::{EndEntityCert, KeyUsage, VerifiedPath}; use crate::errors::{Result as SigstoreResult, SigstoreError}; /// A collection of trusted root certificates. #[derive(Default, Debug)] pub(crate) struct CertificatePool { trusted_roots: Vec>, intermediates: Vec>, } impl CertificatePool { /// Builds a `CertificatePool` instance using the provided list of [`Certificate`]. pub(crate) fn from_certificates<'r, 'i, R, I>( trusted_roots: R, untrusted_intermediates: I, ) -> SigstoreResult where R: IntoIterator>, I: IntoIterator>, { Ok(CertificatePool { trusted_roots: trusted_roots .into_iter() .map(|x| Ok(webpki::anchor_from_trusted_cert(&x)?.to_owned())) .collect::, webpki::Error>>()?, intermediates: untrusted_intermediates .into_iter() .map(|i| i.into_owned()) .collect(), }) } /// Ensures the given certificate has been issued by one of the trusted root certificates /// An `Err` is returned when the verification fails. /// /// **Note well:** certificates issued by Fulcio are, by design, valid only /// for a really limited amount of time. /// Because of that the validity checks performed by this method are more /// relaxed. The validity checks are done inside of /// [`crate::crypto::verify_validity`] and [`crate::crypto::verify_expiration`]. pub(crate) fn verify_pem_cert( &self, cert_pem: &[u8], verification_time: Option, ) -> SigstoreResult<()> { let cert_pem = pem::parse(cert_pem)?; if cert_pem.tag() != "CERTIFICATE" { return Err(SigstoreError::CertificatePoolError( "PEM file is not a certificate".into(), )); } self.verify_der_cert(cert_pem.contents(), verification_time) } /// Ensures the given certificate has been issued by one of the trusted root certificates /// An `Err` is returned when the verification fails. /// /// **Note well:** certificates issued by Fulcio are, by design, valid only /// for a really limited amount of time. /// Because of that the validity checks performed by this method are more /// relaxed. The validity checks are done inside of /// [`crate::crypto::verify_validity`] and [`crate::crypto::verify_expiration`]. pub(crate) fn verify_der_cert( &self, der: &[u8], verification_time: Option, ) -> SigstoreResult<()> { let der = CertificateDer::from(der); let cert = EndEntityCert::try_from(&der)?; let time = std::time::Duration::from_secs(chrono::Utc::now().timestamp() as u64); self.verify_cert_with_time( &cert, verification_time.unwrap_or(UnixTime::since_unix_epoch(time)), )?; Ok(()) } pub(crate) fn verify_cert_with_time<'a, 'cert>( &'a self, cert: &'cert EndEntityCert<'cert>, verification_time: UnixTime, ) -> Result, webpki::Error> where 'a: 'cert, { let signing_algs = webpki::ALL_VERIFICATION_ALGS; let eku_code_signing = ID_KP_CODE_SIGNING.as_bytes(); cert.verify_for_usage( signing_algs, &self.trusted_roots, self.intermediates.as_slice(), verification_time, KeyUsage::required(eku_code_signing), None, None, ) } } ================================================ FILE: src/crypto/keyring.rs ================================================ // Copyright 2023 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::collections::HashMap; use aws_lc_rs::{signature as aws_lc_rs_signature, signature::UnparsedPublicKey}; use const_oid::db::rfc5912::{ID_EC_PUBLIC_KEY, RSA_ENCRYPTION, SECP_256_R_1}; use digest::Digest; use thiserror::Error; use x509_cert::{ der, der::{Decode, Encode}, spki::SubjectPublicKeyInfoOwned, }; #[derive(Error, Debug)] pub enum KeyringError { #[error("malformed key")] KeyMalformed(#[from] x509_cert::der::Error), #[error("unsupported algorithm")] AlgoUnsupported, #[error("requested key not in keyring")] KeyNotFound, #[error("verification failed")] VerificationFailed, } type Result = std::result::Result; /// A CT signing key. struct Key { inner: UnparsedPublicKey>, /// The key's RFC 6962-style "key ID". /// fingerprint: [u8; 32], } impl Key { /// Creates a `Key` from a DER blob containing a SubjectPublicKeyInfo object. pub fn new(spki_bytes: &[u8]) -> Result { let spki = SubjectPublicKeyInfoOwned::from_der(spki_bytes)?; let (algo, params) = if let Some(params) = &spki.algorithm.parameters { // Special-case RSA keys, which don't have SPKI parameters. if spki.algorithm.oid == RSA_ENCRYPTION && params == &der::Any::null() { // TODO(tnytown): Do we need to support RSA keys? return Err(KeyringError::AlgoUnsupported); }; (spki.algorithm.oid, params.decode_as()?) } else { return Err(KeyringError::AlgoUnsupported); }; match (algo, params) { // TODO(tnytown): should we also accept ed25519, p384, ... ? (ID_EC_PUBLIC_KEY, SECP_256_R_1) => Ok(Key { inner: UnparsedPublicKey::new( &aws_lc_rs_signature::ECDSA_P256_SHA256_ASN1, spki.subject_public_key.raw_bytes().to_owned(), ), fingerprint: { let mut hasher = sha2::Sha256::new(); spki.encode(&mut hasher).expect("failed to hash key!"); hasher.finalize().into() }, }), _ => Err(KeyringError::AlgoUnsupported), } } } /// Represents a set of CT signing keys, each of which is potentially a valid signer for /// Signed Certificate Timestamps (SCTs) or Signed Tree Heads (STHs). pub struct Keyring(HashMap<[u8; 32], Key>); impl Keyring { /// Creates a `Keyring` from DER encoded SPKI-format public keys. pub fn new<'a>(keys: impl IntoIterator) -> Result { Ok(Self( keys.into_iter() .flat_map(Key::new) .map(|k| Ok((k.fingerprint, k))) .collect::>()?, )) } /// Verifies `data` against a `signature` with a public key identified by `key_id`. pub fn verify(&self, key_id: &[u8; 32], signature: &[u8], data: &[u8]) -> Result<()> { let key = self.0.get(key_id).ok_or(KeyringError::KeyNotFound)?; key.inner .verify(data, signature) .or(Err(KeyringError::VerificationFailed))?; Ok(()) } } #[cfg(test)] mod tests { use super::Keyring; use crate::crypto::signing_key::ecdsa::{ECDSAKeys, EllipticCurve}; use digest::Digest; use std::io::Write; #[test] fn verify_keyring() { let message = b"some message"; // Create a key pair and a keyring containing the public key. let key_pair = ECDSAKeys::new(EllipticCurve::P256).unwrap(); let signer = key_pair.to_sigstore_signer().unwrap(); let pub_key = key_pair.as_inner().public_key_to_der().unwrap(); let keyring = Keyring::new([pub_key.as_slice()]).unwrap(); // Generate the signature. let signature = signer.sign(message).unwrap(); // Generate the key id. let mut hasher = sha2::Sha256::new(); hasher.write_all(pub_key.as_slice()).unwrap(); let key_id: [u8; 32] = hasher.finalize().into(); // Check for success. assert!( keyring .verify(&key_id, signature.as_slice(), message) .is_ok() ); // Check for failure with incorrect key id. assert!( keyring .verify(&[0; 32], signature.as_slice(), message) .is_err() ); // Check for failure with incorrect payload. let incorrect_message = b"another message"; assert!( keyring .verify(&key_id, signature.as_slice(), incorrect_message) .is_err() ); // Check for failure with incorrect keyring. let incorrect_key_pair = ECDSAKeys::new(EllipticCurve::P256).unwrap(); let incorrect_keyring = Keyring::new([incorrect_key_pair .as_inner() .public_key_to_der() .unwrap() .as_slice()]) .unwrap(); assert!( incorrect_keyring .verify(&key_id, signature.as_slice(), message) .is_err() ); } } ================================================ FILE: src/crypto/merkle/mod.rs ================================================ pub mod proof_verification; pub mod rfc6962; use crate::errors::SigstoreError; use crate::errors::SigstoreError::UnexpectedError; use digest::Output; pub use proof_verification::MerkleProofError; pub(crate) use proof_verification::MerkleProofVerifier; pub(crate) use rfc6962::{Rfc6269Default, Rfc6269HasherTrait}; /// Many rekor models have hex-encoded hashes, this functions helps to avoid repetition. #[cfg(any( feature = "sign", feature = "sigstore-trust-root", feature = "rekor", feature = "verify" ))] pub(crate) fn hex_to_hash_output( h: impl AsRef<[u8]>, ) -> Result, SigstoreError> { hex::decode(h) .map_err(Into::into) .and_then(|h| { <[u8; 32]>::try_from(h.as_slice()).map_err(|err| UnexpectedError(format!("{err:?}"))) }) .map(Into::into) } ================================================ FILE: src/crypto/merkle/proof_verification.rs ================================================ use super::rfc6962::Rfc6269HasherTrait; use MerkleProofError::*; use digest::{Digest, Output}; #[cfg(any( feature = "sign", feature = "sigstore-trust-root", feature = "rekor", feature = "verify" ))] use hex::ToHex; use std::cmp::Ordering; use std::fmt::Debug; #[derive(Debug)] pub enum MerkleProofError { MismatchedRoot { expected: String, got: String }, IndexGtTreeSize, UnexpectedNonEmptyProof, UnexpectedEmptyProof, NewTreeSmaller { new: u64, old: u64 }, WrongProofSize { got: u64, want: u64 }, WrongEmptyTreeHash, } pub(crate) trait MerkleProofVerifier: Rfc6269HasherTrait where O: Eq + AsRef<[u8]> + Clone + Debug, { /// Used to verify hashes. fn verify_match(a: &O, b: &O) -> Result<(), ()> { (a == b).then_some(()).ok_or(()) } /// `verify_inclusion` verifies the correctness of the inclusion proof for the leaf /// with the specified `leaf_hash` and `index`, relatively to the tree of the given `tree_size` /// and `root_hash`. Requires `0 <= index < tree_size`. #[cfg(any( feature = "sign", feature = "sigstore-trust-root", feature = "rekor", feature = "verify" ))] fn verify_inclusion( index: u64, // leaf index, m in RFC leaf_hash: &O, // d(m) leaf hash tree_size: u64, proof_hashes: &[O], // PATH(m, D[n]) audit path root_hash: &O, // MTH(D[n]) ) -> Result<(), MerkleProofError> { if index >= tree_size { return Err(IndexGtTreeSize); } Self::root_from_inclusion_proof(index, leaf_hash, tree_size, proof_hashes).and_then( |calc_root| { Self::verify_match(calc_root.as_ref(), root_hash).map_err(|_| MismatchedRoot { got: root_hash.encode_hex(), expected: calc_root.encode_hex(), }) }, ) } /// `root_from_inclusion_proof` calculates the expected root hash for a tree of the /// given size, provided a leaf index and hash with the corresponding inclusion /// proof. Requires `0 <= index < tree_size`. fn root_from_inclusion_proof( index: u64, // leaf index, m in RFC leaf_hash: &O, // d(m) leaf hash tree_size: u64, proof_hashes: &[O], // PATH(m, D[n]) audit path ) -> Result, MerkleProofError> { if index >= tree_size { return Err(IndexGtTreeSize); } let (inner, border) = Self::decomp_inclusion_proof(index, tree_size); match (proof_hashes.len() as u64, inner + border) { (got, want) if got != want => { return Err(WrongProofSize { got: proof_hashes.len() as u64, want: inner + border, }); } _ => {} } let res_left = Self::chain_inner(leaf_hash, &proof_hashes[..inner as usize], index); let res = Self::chain_border_right(&res_left, &proof_hashes[inner as usize..]); Ok(Box::new(res)) } // `verify_consistency` checks that the passed-in consistency proof is valid // between the passed in tree sizes, with respect to the corresponding root // hashes. Requires `0 <= old_size <= new_size`.. #[cfg(any( feature = "sign", feature = "sigstore-trust-root", feature = "rekor", feature = "verify" ))] fn verify_consistency( old_size: u64, new_size: u64, proof_hashes: &[O], old_root: &O, new_root: &O, ) -> Result<(), MerkleProofError> { match ( Ord::cmp(&old_size, &new_size), old_size == 0, proof_hashes.is_empty(), ) { (Ordering::Greater, _, _) => { return Err(NewTreeSmaller { new: new_size, old: old_size, }); } // when sizes are equal and the proof is empty we can just verify the roots (Ordering::Equal, _, true) => { return Self::verify_match(old_root, new_root).map_err(|_| MismatchedRoot { got: new_root.encode_hex(), expected: old_root.encode_hex(), }); } // the proof cannot be empty if the sizes are equal or the previous size was zero (Ordering::Equal, _, false) | (Ordering::Less, true, false) => { return Err(UnexpectedNonEmptyProof); } // any proof is accepted if old_size == 0 and the hash is the expected empty hash (Ordering::Less, true, true) => { return Self::verify_match(old_root, &Self::empty_root()) .map_err(|_| WrongEmptyTreeHash); } (Ordering::Less, false, true) => return Err(UnexpectedEmptyProof), (Ordering::Less, false, false) => {} } // find the largest power of two smaller than new_size. // - shift: power of two (k in RFC-6962) // - inner: number of hashes in the proof that correspond to the inner part, the hashes // needed to reconstruct the subtree up to the divergence point // - border: number of hashes in the proof that correspond to the border part, the hashes // needed to be reconstructed let shift = old_size.trailing_zeros() as u64; let (inner, border) = Self::decomp_inclusion_proof(old_size - 1, new_size); let inner = inner - shift; // The proof includes the root hash for the sub-tree of size 2^shift. // Unless old_size is that very 2^shift. // - start: offsef into the proof array where the actual path hashes begin // - seed: starting hash for reconstructing the old tree root let (seed, start) = if old_size == 1 << shift { // smaller tree is a perfect subtree (old_root, 0) } else { // seed: use first hash for the proof // start: 1, skip the first hash as we are using it as the seed (&proof_hashes[0], 1) }; // check that the proof has the correct number of hashes. This prevents too short or too // long proofs which could indicate tampering or errors. Needed after unwinding from the // recursive proof algorithm. match (proof_hashes.len() as u64, start + inner + border) { (got, want) if got != want => return Err(WrongProofSize { got, want }), _ => {} } let proof = &proof_hashes[start as usize..]; // mask determines which direction (left or right) to combine the hashes as you walk up // the tree from the seed hash, encoding the path from the subtree to the tree root. // It skips the bits that are always zero due to the subtree's alignment. // if: // - mask is 0 (old_size is power of two): take the hash as is without combining. // Else, take bit matching the level, starting with less significant bits: // - mask bit matching the level is 0: propagate hash // - mask bit matching the level is 1: combine with proof let mask = (old_size - 1) >> shift; // verify the old hash is correct let hash1 = Self::chain_inner_right(seed, &proof[..inner as usize], mask); let hash1 = Self::chain_border_right(&hash1, &proof[inner as usize..]); Self::verify_match(&hash1, old_root).map_err(|_| MismatchedRoot { got: old_root.encode_hex(), expected: hash1.encode_hex(), })?; // verify the new hash is correct let hash2 = Self::chain_inner(seed, &proof[..inner as usize], mask); let hash2 = Self::chain_border_right(&hash2, &proof[inner as usize..]); Self::verify_match(&hash2, new_root).map_err(|_| MismatchedRoot { got: new_root.encode_hex(), expected: hash2.encode_hex(), })?; Ok(()) } /// `chain_inner` computes a subtree hash for a node on or below the tree's right /// border. Assumes `proof_hashes` are ordered from lower levels to upper, and /// `seed` is the initial subtree/leaf hash on the path located at the specified /// `index` on its level. fn chain_inner(seed: &O, proof_hashes: &[O], index: u64) -> O { proof_hashes .iter() .enumerate() .fold(seed.clone(), |seed, (i, h)| { let (left, right) = if ((index >> i) & 1) == 0 { // mask bit is 0: hash( seed || proof_hash ) (&seed, h) } else { // mask bit is 1: hash( proof_hash || seed ) (h, &seed) }; Self::hash_children(left, right) }) } /// `chain_inner_right` computes a subtree hash like `chain_inner`, but only takes /// hashes to the left from the path into consideration, which effectively means /// the result is a hash of the corresponding earlier version of this subtree. fn chain_inner_right(seed: &O, proof_hashes: &[O], index: u64) -> O { proof_hashes .iter() .enumerate() .fold(seed.clone(), |seed, (i, h)| { if ((index >> i) & 1) == 1 { // mask bit is 1: hash( proof_hash || seed ) Self::hash_children(h, seed) } else { // propagate the seed upwards seed } }) } /// `chain_border_right` chains proof hashes along tree borders. This differs from /// inner chaining because `proof` contains only left-side subtree hashes. /// Used to finish the path up to the root after the walk done using the mask. fn chain_border_right(seed: &O, proof_hashes: &[O]) -> O { // always combine as hash( proof_hash || seed ) proof_hashes .iter() .fold(seed.clone(), |seed, h| Self::hash_children(h, seed)) } /// `decomp_inclusion_proof` breaks down inclusion proof for a leaf at the specified /// `index` in a tree of the specified `size` into 2 components. The splitting /// point between them is where paths to leaves `index` and `tree_size-1` diverge. /// Returns lengths of the bottom and upper proof parts correspondingly. The sum /// of the two determines the correct length of the inclusion proof. fn decomp_inclusion_proof(index: u64, tree_size: u64) -> (u64, u64) { let inner: u64 = Self::inner_proof_size(index, tree_size); let border = (index >> inner).count_ones() as u64; (inner, border) } // `inner_proof_size` computes the number of inner levels (hashes) required in the audit path // given a leaf at index in a tree of tree_size fn inner_proof_size(index: u64, tree_size: u64) -> u64 { // return the position of the highest differing bit, which is the number of inner proof // steps. u64::BITS as u64 - ((index ^ (tree_size - 1)).leading_zeros() as u64) } } impl MerkleProofVerifier> for T where T: Digest {} #[cfg(test)] mod test_verify { use crate::crypto::merkle::rfc6962::Rfc6269HasherTrait; use crate::crypto::merkle::{MerkleProofVerifier, Rfc6269Default}; use hex_literal::hex; #[derive(Debug)] struct InclusionProofTestVector<'a> { leaf: u64, size: u64, proof: &'a [[u8; 32]], } #[derive(Debug)] struct ConsistencyTestVector<'a> { size1: u64, size2: u64, proof: &'a [[u8; 32]], } // InclusionProbe is a parameter set for inclusion proof verification. #[derive(Debug)] struct InclusionProbe { leaf_index: u64, tree_size: u64, root: [u8; 32], leaf_hash: [u8; 32], proof: Vec<[u8; 32]>, desc: &'static str, } // ConsistencyProbe is a parameter set for consistency proof verification. #[derive(Debug)] struct ConsistencyProbe<'a> { size1: u64, size2: u64, root1: &'a [u8; 32], root2: &'a [u8; 32], proof: Vec<[u8; 32]>, desc: &'static str, } const SHA256_SOME_HASH: [u8; 32] = hex!("abacaba000000000000000000000000000000000000000000060061e00123456"); const SHA256_EMPTY_TREE_HASH: [u8; 32] = hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); const ZERO_HASH: [u8; 32] = [0; 32]; const INCLUSION_PROOFS: [InclusionProofTestVector; 6] = [ InclusionProofTestVector { leaf: 0, size: 0, proof: &[], }, InclusionProofTestVector { leaf: 1, size: 1, proof: &[], }, InclusionProofTestVector { leaf: 1, size: 8, proof: &[ hex!("96a296d224f285c67bee93c30f8a309157f0daa35dc5b87e410b78630a09cfc7"), hex!("5f083f0a1a33ca076a95279832580db3e0ef4584bdff1f54c8a360f50de3031e"), hex!("6b47aaf29ee3c2af9af889bc1fb9254dabd31177f16232dd6aab035ca39bf6e4"), ], }, InclusionProofTestVector { leaf: 6, size: 8, proof: &[ hex!("bc1a0643b12e4d2d7c77918f44e0f4f79a838b6cf9ec5b5c283e1f4d88599e6b"), hex!("ca854ea128ed050b41b35ffc1b87b8eb2bde461e9e3b5596ece6b9d5975a0ae0"), hex!("d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7"), ], }, InclusionProofTestVector { leaf: 3, size: 3, proof: &[hex!( "fac54203e7cc696cf0dfcb42c92a1d9dbaf70ad9e621f4bd8d98662f00e3c125" )], }, InclusionProofTestVector { leaf: 2, size: 5, proof: &[ hex!("6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"), hex!("5f083f0a1a33ca076a95279832580db3e0ef4584bdff1f54c8a360f50de3031e"), hex!("bc1a0643b12e4d2d7c77918f44e0f4f79a838b6cf9ec5b5c283e1f4d88599e6b"), ], }, ]; const CONSISTENCY_PROOFS: [ConsistencyTestVector; 5] = [ ConsistencyTestVector { size1: 1, size2: 1, proof: &[], }, ConsistencyTestVector { size1: 1, size2: 8, proof: &[ hex!("96a296d224f285c67bee93c30f8a309157f0daa35dc5b87e410b78630a09cfc7"), hex!("5f083f0a1a33ca076a95279832580db3e0ef4584bdff1f54c8a360f50de3031e"), hex!("6b47aaf29ee3c2af9af889bc1fb9254dabd31177f16232dd6aab035ca39bf6e4"), ], }, ConsistencyTestVector { size1: 6, size2: 8, proof: &[ hex!("0ebc5d3437fbe2db158b9f126a1d118e308181031d0a949f8dededebc558ef6a"), hex!("ca854ea128ed050b41b35ffc1b87b8eb2bde461e9e3b5596ece6b9d5975a0ae0"), hex!("d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7"), ], }, ConsistencyTestVector { size1: 2, size2: 5, proof: &[ hex!("5f083f0a1a33ca076a95279832580db3e0ef4584bdff1f54c8a360f50de3031e"), hex!("bc1a0643b12e4d2d7c77918f44e0f4f79a838b6cf9ec5b5c283e1f4d88599e6b"), ], }, ConsistencyTestVector { size1: 6, size2: 7, proof: &[ hex!("0ebc5d3437fbe2db158b9f126a1d118e308181031d0a949f8dededebc558ef6a"), hex!("b08693ec2e721597130641e8211e7eedccb4c26413963eee6c1e2ed16ffb1a5f"), hex!("d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7"), ], }, ]; const ROOTS: [[u8; 32]; 8] = [ hex!("6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"), hex!("fac54203e7cc696cf0dfcb42c92a1d9dbaf70ad9e621f4bd8d98662f00e3c125"), hex!("aeb6bcfe274b70a14fb067a5e5578264db0fa9b51af5e0ba159158f329e06e77"), hex!("d37ee418976dd95753c1c73862b9398fa2a2cf9b4ff0fdfe8b30cd95209614b7"), hex!("4e3bbb1f7b478dcfe71fb631631519a3bca12c9aefca1612bfce4c13a86264d4"), hex!("76e67dadbcdf1e10e1b74ddc608abd2f98dfb16fbce75277b5232a127f2087ef"), hex!("ddb89be403809e325750d3d263cd78929c2942b7942a34b77e122c9594a74c8c"), hex!("5dc9da79a70659a9ad559cb701ded9a2ab9d823aad2f4960cfe370eff4604328"), ]; const LEAVES: &[&[u8]] = &[ &hex!(""), &hex!("00"), &hex!("10"), &hex!("2021"), &hex!("3031"), &hex!("40414243"), &hex!("5051525354555657"), &hex!("606162636465666768696a6b6c6d6e6f"), ]; fn corrupt_inclusion_proof( leaf_index: u64, tree_size: u64, proof: &[[u8; 32]], root: &[u8; 32], leaf_hash: &[u8; 32], ) -> Vec { let ret = vec![ // Wrong leaf index. InclusionProbe { leaf_index: leaf_index.wrapping_sub(1), // avoid panic due to underflow tree_size, root: *root, leaf_hash: *leaf_hash, proof: proof.to_vec(), desc: "leaf_index - 1", }, InclusionProbe { leaf_index: leaf_index + 1, tree_size, root: *root, leaf_hash: *leaf_hash, proof: proof.to_vec(), desc: "leaf_index + 1", }, InclusionProbe { leaf_index: leaf_index ^ 2, tree_size, root: *root, leaf_hash: *leaf_hash, proof: proof.to_vec(), desc: "leaf_index ^ 2", }, // Wrong tree height. InclusionProbe { leaf_index, tree_size: tree_size / 2, root: *root, leaf_hash: *leaf_hash, proof: proof.to_vec(), desc: "tree_size / 2", }, // Wrong leaf or root. InclusionProbe { leaf_index, tree_size: tree_size * 2, root: *root, leaf_hash: *leaf_hash, proof: proof.to_vec(), desc: "tree_size * 2", }, InclusionProbe { leaf_index, tree_size, root: *root, leaf_hash: *b"WrongLeafWrongLeafWrongLeafWrong", proof: proof.to_vec(), desc: "wrong leaf", }, InclusionProbe { leaf_index, tree_size, root: SHA256_EMPTY_TREE_HASH, leaf_hash: *leaf_hash, proof: proof.to_vec(), desc: "empty root", }, InclusionProbe { leaf_index, tree_size, root: SHA256_SOME_HASH, leaf_hash: *leaf_hash, proof: proof.to_vec(), desc: "random root", }, // Add garbage at the end. InclusionProbe { leaf_index, tree_size, root: *root, leaf_hash: *leaf_hash, proof: [proof.to_vec(), [[0_u8; 32]].to_vec()].concat(), desc: "trailing garbage", }, InclusionProbe { leaf_index, tree_size, root: *root, leaf_hash: *leaf_hash, proof: [proof.to_vec(), [*root].to_vec()].concat(), desc: "trailing root", }, // Add garbage at the front. InclusionProbe { leaf_index, tree_size, root: *root, leaf_hash: *leaf_hash, proof: [[[0_u8; 32]].to_vec(), proof.to_vec()].concat(), desc: "preceding garbage", }, InclusionProbe { leaf_index, tree_size, root: *root, leaf_hash: *leaf_hash, proof: [[*root].to_vec(), proof.to_vec()].concat(), desc: "preceding root", }, ]; ret } fn verifier_check( leaf_index: u64, tree_size: u64, proof_hashes: &[[u8; 32]], root: &[u8; 32], leaf_hash: &[u8; 32], ) -> Result<(), String> { let probes = corrupt_inclusion_proof(leaf_index, tree_size, proof_hashes, root, leaf_hash); let leaf_hash = leaf_hash.into(); let root_hash = root.into(); let proof_hashes = proof_hashes.iter().map(|&h| h.into()).collect::>(); let got = Rfc6269Default::root_from_inclusion_proof( leaf_index, leaf_hash, tree_size, &proof_hashes, ) .map_err(|err| format!("{err:?}"))?; Rfc6269Default::verify_match(got.as_ref(), root_hash) .map_err(|_| format!("roots did not match got: {got:x?} expected: {root:x?}"))?; Rfc6269Default::verify_inclusion( leaf_index, leaf_hash, tree_size, &proof_hashes, root_hash, ) .map_err(|err| format!("{err:?}"))?; // returns Err if any probe is accepted probes .into_iter() .map(|p| { Rfc6269Default::verify_inclusion( p.leaf_index, (&p.leaf_hash).into(), p.tree_size, &p.proof.iter().map(|&h| h.into()).collect::>(), (&p.root).into(), ) .err() .ok_or(format!("accepted incorrect inclusion proof: {:?}", p.desc)) }) .collect::, _>>()?; Ok(()) } fn verifier_consistency_check( size1: u64, size2: u64, proof: &[[u8; 32]], root1: &[u8; 32], root2: &[u8; 32], ) -> Result<(), String> { // Verify original consistency proof. let proof_hashes = proof.iter().map(|&h| h.into()).collect::>(); Rfc6269Default::verify_consistency(size1, size2, &proof_hashes, root1.into(), root2.into()) .map_err(|err| format!("incorrectly rejected with {err:?}"))?; // For simplicity test only non-trivial proofs that have root1 != root2, size1 != 0 and size1 != size2. if proof.is_empty() { return Ok(()); } for (i, p) in corrupt_consistency_proof(size1, size2, root1, root2, proof) .iter() .enumerate() { Rfc6269Default::verify_consistency( p.size1, p.size2, &p.proof.iter().map(|&h| h.into()).collect::>(), p.root1.as_slice().into(), p.root2.as_slice().into(), ) .err() .ok_or(format!("[{i} incorrectly accepted: {:?}", p.desc))?; } Ok(()) } fn corrupt_consistency_proof<'a>( size1: u64, size2: u64, root1: &'a [u8; 32], root2: &'a [u8; 32], proof: &[[u8; 32]], ) -> Vec> { let ln = proof.len(); let mut ret = vec![ // Wrong size1. ConsistencyProbe { size1: size1 - 1, size2, root1, root2, proof: proof.to_vec(), desc: "size1 - 1", }, ConsistencyProbe { size1: size1 + 1, size2, root1, root2, proof: proof.to_vec(), desc: "size1 + 1", }, ConsistencyProbe { size1: size1 ^ 2, size2, root1, root2, proof: proof.to_vec(), desc: "size1 ^ 2", }, // Wrong tree height. ConsistencyProbe { size1, size2: size2 * 2, root1, root2, proof: proof.to_vec(), desc: "size2 * 2", }, ConsistencyProbe { size1, size2: size2 / 2, root1, root2, proof: proof.to_vec(), desc: "size2 / 2", }, // Wrong root. ConsistencyProbe { size1, size2, root1: &ZERO_HASH, root2, proof: proof.to_vec(), desc: "wrong root1", }, ConsistencyProbe { size1, size2, root1, root2: &ZERO_HASH, proof: proof.to_vec(), desc: "wrong root2", }, ConsistencyProbe { size1, size2, root1: root2, root2: root1, proof: proof.to_vec(), desc: "swapped roots", }, // Empty proof. ConsistencyProbe { size1, size2, root1, root2, proof: vec![], desc: "empty proof", }, // Add garbage at the end. ConsistencyProbe { size1, size2, root1, root2, proof: [proof, &[ZERO_HASH]].concat(), desc: "trailing garbage", }, ConsistencyProbe { size1, size2, root1, root2, proof: [proof, &[*root1]].concat(), desc: "trailing root1", }, ConsistencyProbe { size1, size2, root1, root2, proof: [proof, &[*root2]].concat(), desc: "trailing root2", }, // Add garbage at the front. ConsistencyProbe { size1, size2, root1, root2, proof: [&[ZERO_HASH], proof].concat(), desc: "preceding garbage", }, ConsistencyProbe { size1, size2, root1, root2, proof: [&[*root1], proof].concat(), desc: "preceding root1", }, ConsistencyProbe { size1, size2, root1, root2, proof: [&[*root2], proof].concat(), desc: "preceding root2", }, ConsistencyProbe { size1, size2, root1, root2, proof: [&[proof[0]], proof].concat(), desc: "preceding proof[0]", }, ]; if ln > 0 { ret.push(ConsistencyProbe { size1, size2, root1, root2, proof: proof[..ln - 1].to_vec(), desc: "truncated proof", }); } // add probes with proves that have a flipped 3th bit of i-th byte of the i-th hash ret.extend((0..ln).map(|i| { let mut wrong_proof = proof.to_vec(); wrong_proof[i][i] ^= 4; ConsistencyProbe { size1, size2, root1, root2, proof: wrong_proof, desc: "proof with flipped bit", } })); ret } #[test] fn test_verify_inclusion_single_entry() { let data = b"data"; let hash = &Rfc6269Default::hash_leaf(data); let proof = []; let zero_hash = ZERO_HASH.as_slice().into(); let test_cases = [ (hash, hash, false), (hash, zero_hash, true), (zero_hash, hash, true), ]; for (i, (root, leaf, want_err)) in test_cases.into_iter().enumerate() { let res = Rfc6269Default::verify_inclusion(0, leaf, 1, &proof, root); assert_eq!( res.is_err(), want_err, "unexpected inclusion proof result {res:?} for case {i:?}" ) } } #[test] fn test_verify_inclusion() { let proof = []; let probes = [(0, 0), (0, 1), (1, 0), (2, 1)]; probes.into_iter().for_each(|(index, size)| { let result = Rfc6269Default::verify_inclusion( index, SHA256_SOME_HASH.as_slice().into(), size, &proof, ZERO_HASH.as_slice().into(), ); assert!(result.is_err(), "Incorrectly verified invalid root/leaf",); let result = Rfc6269Default::verify_inclusion( index, ZERO_HASH.as_slice().into(), size, &proof, SHA256_EMPTY_TREE_HASH.as_slice().into(), ); assert!(result.is_err(), "Incorrectly verified invalid root/leaf",); let result = Rfc6269Default::verify_inclusion( index, SHA256_SOME_HASH.as_slice().into(), size, &proof, SHA256_EMPTY_TREE_HASH.as_slice().into(), ); assert!(result.is_err(), "Incorrectly verified invalid root/leaf"); }); for i in 1..6 { let p = &INCLUSION_PROOFS[i]; let leaf_hash = &Rfc6269Default::hash_leaf(LEAVES[i]).into(); let result = verifier_check( p.leaf - 1, p.size, p.proof, &ROOTS[p.size as usize - 1], leaf_hash, ); assert!(result.is_err(), "{result:?}") } } #[test] fn test_verify_consistency() { let root1 = &[0; 32].into(); let root2 = &[1; 32].into(); let proof1 = [].as_slice(); let proof2 = [SHA256_EMPTY_TREE_HASH.into()]; let empty_tree_hash = &SHA256_EMPTY_TREE_HASH.into(); let test_cases = [ // Same sizes but the root hashes differ. (0, 0, root1, root2, proof1, true), (1, 1, root1, root2, proof1, true), // Sizes that are always consistent. (0, 0, empty_tree_hash, empty_tree_hash, proof1, false), (0, 1, empty_tree_hash, root2, proof1, false), (1, 1, root2, root2, proof1, false), // Time travel to the past. (1, 0, root1, root2, proof1, true), (2, 1, root1, root2, proof1, true), // Empty proof. (1, 2, root1, root2, proof1, true), // Roots don't match with equal size, append-only violated. (0, 0, empty_tree_hash, root2, proof1, true), (1, 1, empty_tree_hash, root2, proof1, true), // Roots match but the proof is not empty. (0, 0, empty_tree_hash, empty_tree_hash, &proof2, true), (0, 1, empty_tree_hash, empty_tree_hash, &proof2, true), (1, 1, empty_tree_hash, empty_tree_hash, &proof2, true), ]; for (i, (size1, size2, root1, root2, proof, want_err)) in test_cases.into_iter().enumerate() { let res = Rfc6269Default::verify_consistency(size1, size2, proof, root1, root2); assert_eq!( res.is_err(), want_err, "unexpected proof result {res:?}, case {i}" ); } for p in CONSISTENCY_PROOFS.into_iter() { let result = verifier_consistency_check( p.size1, p.size2, p.proof, &ROOTS[p.size1 as usize - 1], &ROOTS[p.size2 as usize - 1], ); assert!(result.is_ok(), "failed with error: {result:?}"); } } } ================================================ FILE: src/crypto/merkle/rfc6962.rs ================================================ use super::rfc6962::Rfc6269HashPrefix::{RFC6962LeafHashPrefix, RFC6962NodeHashPrefix}; use digest::Output; use sha2::{Digest, Sha256}; /// This is the prefix that gets added to the data before the hash is calculated. #[repr(u8)] enum Rfc6269HashPrefix { RFC6962LeafHashPrefix = 0, RFC6962NodeHashPrefix = 1, } /// Trait that represents the [Merkle tree operations as defined in RFC6962](https://www.rfc-editor.org/rfc/rfc6962.html#section-2.1). pub(crate) trait Rfc6269HasherTrait { /// Hashing an empty root is equivalent to hashing an empty string. fn empty_root() -> O; /// Leaf hashes are calculated the following way: `hash(0x00 || leaf)`. fn hash_leaf(leaf: impl AsRef<[u8]>) -> O; /// The hash of nodes with children is calculated recursively as: `hash(0x01 || left || right)`. fn hash_children(left: impl AsRef<[u8]>, right: impl AsRef<[u8]>) -> O; } impl Rfc6269HasherTrait> for T where T: Digest, { fn empty_root() -> Output { T::new().finalize() } fn hash_leaf(leaf: impl AsRef<[u8]>) -> Output { T::new() .chain_update([RFC6962LeafHashPrefix as u8]) .chain_update(leaf) .finalize() } fn hash_children(left: impl AsRef<[u8]>, right: impl AsRef<[u8]>) -> Output { T::new() .chain_update([RFC6962NodeHashPrefix as u8]) .chain_update(left) .chain_update(right) .finalize() } } /// RFC6962 uses SHA-256 as the default hash-function. pub(crate) type Rfc6269Default = Sha256; /// These tests were taken from the [transparency-dev Merkle implementation](https://github.com/transparency-dev/merkle/blob/036047b5d2f7faf3b1ee643d391e60fe5b1defcf/rfc6962/rfc6962_test.go). #[cfg(test)] mod test_rfc6962 { use crate::crypto::merkle::Rfc6269Default; use crate::crypto::merkle::rfc6962::Rfc6269HasherTrait; use hex_literal::hex; #[derive(Debug, PartialEq)] struct TestCase { pub desc: String, pub got: [u8; 32], pub want: [u8; 32], } #[test] fn test_hasher() { let leaf_hash = Rfc6269Default::hash_leaf(b"L123456"); let empty_leaf_hash = Rfc6269Default::hash_leaf(b""); let test_cases: Vec<_> = [ TestCase { desc: "RFC6962 Empty".to_string(), want: hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), got: Rfc6269Default::empty_root().into(), }, TestCase { desc: "RFC6962 Empty Leaf".to_string(), want: hex!("6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"), got: empty_leaf_hash.into(), }, TestCase { desc: "RFC6962 Leaf".to_string(), want: hex!("395aa064aa4c29f7010acfe3f25db9485bbd4b91897b6ad7ad547639252b4d56"), got: leaf_hash.into(), }, TestCase { desc: "RFC6962 Node".to_string(), want: hex!("aa217fe888e47007fa15edab33c2b492a722cb106c64667fc2b044444de66bbb"), got: Rfc6269Default::hash_children(b"N123", b"N456").into(), }, ] .into_iter() .filter(|tc| tc.got != tc.want) .collect(); assert_eq!(test_cases.len(), 0, "failed tests: {test_cases:?}") } #[test] fn test_collisions() { let l1 = b"Hello".to_vec(); let l2 = b"World".to_vec(); let hash1 = Rfc6269Default::hash_leaf(&l1); let hash2 = Rfc6269Default::hash_leaf(&l2); assert_ne!(hash1, hash2, "got identical hashes for different leafs"); let sub_hash1 = Rfc6269Default::hash_children(&l1, &l2); let sub_hash2 = Rfc6269Default::hash_children(&l2, &l1); assert_ne!(sub_hash1, sub_hash2, "got same hash for different order"); let forged_hash = Rfc6269Default::hash_leaf([l1, l2].concat()); assert_ne!( sub_hash1, forged_hash, "hasher is not second-preimage resistant" ); } } ================================================ FILE: src/crypto/mod.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Structures and constants required to perform cryptographic operations. use sha2::{Sha256, Sha384}; use crate::errors::*; pub use signing_key::SigStoreSigner; pub use verification_key::CosignVerificationKey; pub(crate) mod merkle; /// Different digital signature algorithms. /// * `RSA_PSS_SHA256`: RSA PSS padding using SHA-256 /// for RSA signatures. All the `usize` member inside /// an RSA enum represents the key size of the RSA key. /// * `RSA_PSS_SHA384`: RSA PSS padding using SHA-384 /// for RSA signatures. /// * `RSA_PSS_SHA512`: RSA PSS padding using SHA-512 /// for RSA signatures. /// * `RSA_PKCS1_SHA256`: PKCS#1 1.5 padding using /// SHA-256 for RSA signatures. /// * `RSA_PKCS1_SHA384`: PKCS#1 1.5 padding using /// SHA-384 for RSA signatures. /// * `RSA_PKCS1_SHA512`: PKCS#1 1.5 padding using /// SHA-512 for RSA signatures. /// * `ECDSA_P256_SHA256_ASN1`: ASN.1 DER-encoded ECDSA /// signatures using the P-256 curve and SHA-256. It /// is the default signing scheme. /// * `ECDSA_P384_SHA384_ASN1`: ASN.1 DER-encoded ECDSA /// signatures using the P-384 curve and SHA-384. /// * `ED25519`: ECDSA signature using SHA2-512 /// as the digest function and curve edwards25519. The /// signature format please refer /// to [RFC 8032](https://www.rfc-editor.org/rfc/rfc8032.html#section-5.1.6). #[allow(non_camel_case_types)] #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum SigningScheme { RSA_PSS_SHA256(usize), RSA_PSS_SHA384(usize), RSA_PSS_SHA512(usize), RSA_PKCS1_SHA256(usize), RSA_PKCS1_SHA384(usize), RSA_PKCS1_SHA512(usize), ECDSA_P256_SHA256_ASN1, ECDSA_P384_SHA384_ASN1, ED25519, } impl std::fmt::Display for SigningScheme { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { SigningScheme::RSA_PSS_SHA256(_) => write!(f, "RSA_PSS_SHA256"), SigningScheme::RSA_PSS_SHA384(_) => write!(f, "RSA_PSS_SHA384"), SigningScheme::RSA_PSS_SHA512(_) => write!(f, "RSA_PSS_SHA512"), SigningScheme::RSA_PKCS1_SHA256(_) => write!(f, "RSA_PKCS1_SHA256"), SigningScheme::RSA_PKCS1_SHA384(_) => write!(f, "RSA_PKCS1_SHA384"), SigningScheme::RSA_PKCS1_SHA512(_) => write!(f, "RSA_PKCS1_SHA512"), SigningScheme::ECDSA_P256_SHA256_ASN1 => write!(f, "ECDSA_P256_SHA256_ASN1"), SigningScheme::ECDSA_P384_SHA384_ASN1 => write!(f, "ECDSA_P384_SHA384_ASN1"), SigningScheme::ED25519 => write!(f, "ED25519"), } } } impl TryFrom<&str> for SigningScheme { type Error = String; fn try_from(value: &str) -> std::result::Result { match value { "ECDSA_P256_SHA256_ASN1" => Ok(Self::ECDSA_P256_SHA256_ASN1), "ECDSA_P384_SHA384_ASN1" => Ok(Self::ECDSA_P384_SHA384_ASN1), "ED25519" => Ok(Self::ED25519), "RSA_PSS_SHA256" => Ok(Self::RSA_PSS_SHA256(DEFAULT_KEY_SIZE)), "RSA_PSS_SHA384" => Ok(Self::RSA_PSS_SHA384(DEFAULT_KEY_SIZE)), "RSA_PSS_SHA512" => Ok(Self::RSA_PSS_SHA512(DEFAULT_KEY_SIZE)), "RSA_PKCS1_SHA256" => Ok(Self::RSA_PKCS1_SHA256(DEFAULT_KEY_SIZE)), "RSA_PKCS1_SHA384" => Ok(Self::RSA_PKCS1_SHA384(DEFAULT_KEY_SIZE)), "RSA_PKCS1_SHA512" => Ok(Self::RSA_PKCS1_SHA512(DEFAULT_KEY_SIZE)), unknown => Err(format!("Unsupported signing algorithm: {unknown}")), } } } impl SigningScheme { /// Create a key-pair due to the given signing scheme. pub fn create_signer(&self) -> Result { Ok(match self { SigningScheme::ECDSA_P256_SHA256_ASN1 => SigStoreSigner::ECDSA_P256_SHA256_ASN1( EcdsaSigner::<_, Sha256>::from_ecdsa_keys(&EcdsaKeys::::new()?)?, ), SigningScheme::ECDSA_P384_SHA384_ASN1 => SigStoreSigner::ECDSA_P384_SHA384_ASN1( EcdsaSigner::<_, Sha384>::from_ecdsa_keys(&EcdsaKeys::::new()?)?, ), SigningScheme::ED25519 => { SigStoreSigner::ED25519(Ed25519Signer::from_ed25519_keys(&Ed25519Keys::new()?)?) } SigningScheme::RSA_PSS_SHA256(bit_size) => { SigStoreSigner::RSA_PSS_SHA256(RSASigner::from_rsa_keys( &RSAKeys::new(*bit_size)?, DigestAlgorithm::Sha256, PaddingScheme::PSS, )) } SigningScheme::RSA_PSS_SHA384(bit_size) => { SigStoreSigner::RSA_PSS_SHA384(RSASigner::from_rsa_keys( &RSAKeys::new(*bit_size)?, DigestAlgorithm::Sha384, PaddingScheme::PSS, )) } SigningScheme::RSA_PSS_SHA512(bit_size) => { SigStoreSigner::RSA_PSS_SHA512(RSASigner::from_rsa_keys( &RSAKeys::new(*bit_size)?, DigestAlgorithm::Sha512, PaddingScheme::PSS, )) } SigningScheme::RSA_PKCS1_SHA256(bit_size) => { SigStoreSigner::RSA_PKCS1_SHA256(RSASigner::from_rsa_keys( &RSAKeys::new(*bit_size)?, DigestAlgorithm::Sha256, PaddingScheme::PKCS1v15, )) } SigningScheme::RSA_PKCS1_SHA384(bit_size) => { SigStoreSigner::RSA_PKCS1_SHA384(RSASigner::from_rsa_keys( &RSAKeys::new(*bit_size)?, DigestAlgorithm::Sha384, PaddingScheme::PKCS1v15, )) } SigningScheme::RSA_PKCS1_SHA512(bit_size) => { SigStoreSigner::RSA_PKCS1_SHA512(RSASigner::from_rsa_keys( &RSAKeys::new(*bit_size)?, DigestAlgorithm::Sha512, PaddingScheme::PKCS1v15, )) } }) } } /// The default signature verification algorithm used by Sigstore. /// Sigstore relies on NIST P-256 /// NIST P-256 is a Weierstrass curve specified in [FIPS 186-4: Digital Signature Standard (DSS)](https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf). /// Also known as prime256v1 (ANSI X9.62) and secp256r1 (SECG) impl Default for SigningScheme { fn default() -> Self { SigningScheme::ECDSA_P256_SHA256_ASN1 } } /// A signature produced by a private key pub enum Signature<'a> { /// Raw signature. There's no need to process the contents Raw(&'a [u8]), /// A base64 encoded signature Base64Encoded(&'a [u8]), } #[cfg(feature = "cert")] pub(crate) mod certificate; #[cfg(feature = "cert")] pub(crate) mod certificate_pool; #[cfg(feature = "cert")] pub(crate) use certificate_pool::CertificatePool; #[cfg(feature = "cert")] pub(crate) mod keyring; pub mod verification_key; use self::signing_key::{ ecdsa::ec::{EcdsaKeys, EcdsaSigner}, ed25519::{Ed25519Keys, Ed25519Signer}, rsa::{DEFAULT_KEY_SIZE, DigestAlgorithm, PaddingScheme, RSASigner, keypair::RSAKeys}, }; pub mod signing_key; #[cfg(any(feature = "sign", feature = "verify"))] pub(crate) mod transparency; #[cfg(test)] pub(crate) mod tests { use chrono::{DateTime, TimeDelta, Utc}; use openssl::asn1::{Asn1Integer, Asn1Time}; use openssl::bn::{BigNum, MsbOption}; use openssl::conf::{Conf, ConfMethod}; use openssl::ec::{EcGroup, EcKey}; use openssl::hash::MessageDigest; use openssl::nid::Nid; use openssl::pkey::{self, Id, PKey}; use openssl::x509::extension::{ AuthorityKeyIdentifier, BasicConstraints, ExtendedKeyUsage, KeyUsage, SubjectAlternativeName, SubjectKeyIdentifier, }; use openssl::x509::{X509, X509Extension, X509NameBuilder}; pub(crate) const PUBLIC_KEY: &str = r#"-----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAENptdY/l3nB0yqkXLBWkZWQwo6+cu OSWS1X9vPavpiQOoTTGC0xX57OojUadxF1cdQmrsiReWg2Wn4FneJfa8xw== -----END PUBLIC KEY-----"#; pub(crate) struct CertData { pub cert: X509, pub private_key: pkey::PKey, } pub(crate) struct CertGenerationOptions { pub digital_signature_key_usage: bool, pub code_signing_extended_key_usage: bool, pub subject_email: Option, pub subject_url: Option, //TODO: remove macro once https://github.com/sfackler/rust-openssl/issues/1411 //is fixed #[allow(dead_code)] pub subject_issuer: Option, pub not_before: DateTime, pub not_after: DateTime, pub private_key: pkey::PKey, pub public_key: pkey::PKey, } impl Default for CertGenerationOptions { fn default() -> Self { let not_before = Utc::now() .checked_sub_signed(TimeDelta::try_days(1).unwrap()) .unwrap(); let not_after = Utc::now() .checked_add_signed(TimeDelta::try_days(1).unwrap()) .unwrap(); // Sigstore relies on NIST P-256 // NIST P-256 is a Weierstrass curve specified in FIPS 186-4: Digital Signature Standard (DSS): // https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-4.pdf // Also known as prime256v1 (ANSI X9.62) and secp256r1 (SECG) let (private_key, public_key) = generate_ecdsa_p256_keypair(); CertGenerationOptions { digital_signature_key_usage: true, code_signing_extended_key_usage: true, subject_email: Some(String::from("tests@sigstore-rs.dev")), subject_issuer: Some(String::from("https://sigstore.dev/oauth")), subject_url: None, not_before, not_after, private_key, public_key, } } } pub(crate) fn generate_ecdsa_p256_keypair() -> (pkey::PKey, pkey::PKey) { let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1).expect("Cannot create EcGroup"); let ec_private_key = EcKey::generate(&group).expect("Cannot create private key"); let ec_public_key = ec_private_key.public_key(); let ec_pub_key = EcKey::from_public_key(&group, ec_public_key).expect("Cannot create ec pub key"); let public_key = pkey::PKey::from_ec_key(ec_pub_key).expect("Cannot create pkey"); let private_key = pkey::PKey::from_ec_key(ec_private_key).expect("Cannot create pkey"); (private_key, public_key) } pub(crate) fn generate_ecdsa_p384_keypair() -> (pkey::PKey, pkey::PKey) { let group = EcGroup::from_curve_name(Nid::SECP384R1).expect("Cannot create EcGroup"); let ec_private_key = EcKey::generate(&group).expect("Cannot create private key"); let ec_public_key = ec_private_key.public_key(); let ec_pub_key = EcKey::from_public_key(&group, ec_public_key).expect("Cannot create ec pub key"); let public_key = pkey::PKey::from_ec_key(ec_pub_key).expect("Cannot create pkey"); let private_key = pkey::PKey::from_ec_key(ec_private_key).expect("Cannot create pkey"); (private_key, public_key) } pub(crate) fn generate_ed25519_keypair() -> (pkey::PKey, pkey::PKey) { let private_key = PKey::generate_ed25519().expect("Cannot create private key"); let public_key = private_key .raw_public_key() .expect("Cannot export public key"); let public_key = PKey::public_key_from_raw_bytes(&public_key, Id::ED25519) .expect("Cannot create ec pub key"); (private_key, public_key) } pub(crate) fn generate_rsa_keypair( bits: u32, ) -> (pkey::PKey, pkey::PKey) { use openssl::rsa; let rsa_private_key = rsa::Rsa::generate(bits).expect("Cannot generate RSA key"); let rsa_public_key_pem = rsa_private_key .public_key_to_pem() .expect("Cannot obtain public key"); let rsa_public_key = rsa::Rsa::public_key_from_pem(&rsa_public_key_pem) .expect("Cannot create rsa_public_key"); let private_key = pkey::PKey::from_rsa(rsa_private_key).expect("cannot create private_key"); let public_key = pkey::PKey::from_rsa(rsa_public_key).expect("cannot create public_key"); (private_key, public_key) } pub(crate) fn generate_dsa_keypair( bits: u32, ) -> (pkey::PKey, pkey::PKey) { use openssl::dsa; let dsa_private_key = dsa::Dsa::generate(bits).expect("Cannot generate DSA key"); let dsa_public_key_pem = dsa_private_key .public_key_to_pem() .expect("Cannot obtain public key"); let dsa_public_key = dsa::Dsa::public_key_from_pem(&dsa_public_key_pem) .expect("Cannot create rsa_public_key"); let private_key = pkey::PKey::from_dsa(dsa_private_key).expect("cannot create private_key"); let public_key = pkey::PKey::from_dsa(dsa_public_key).expect("cannot create public_key"); (private_key, public_key) } pub(crate) fn generate_certificate( issuer: Option<&CertData>, settings: CertGenerationOptions, ) -> anyhow::Result { let mut x509_name_builder = X509NameBuilder::new()?; x509_name_builder.append_entry_by_text("O", "tests")?; x509_name_builder.append_entry_by_text("CN", "sigstore.test")?; let x509_name = x509_name_builder.build(); let mut x509_builder = openssl::x509::X509::builder()?; x509_builder.set_subject_name(&x509_name)?; x509_builder .set_pubkey(&settings.public_key) .expect("Cannot set public key"); // set serial number let mut big = BigNum::new().expect("Cannot create BigNum"); big.rand(152, MsbOption::MAYBE_ZERO, true)?; let serial_number = Asn1Integer::from_bn(&big)?; x509_builder.set_serial_number(&serial_number)?; // set version 3 x509_builder.set_version(2)?; // x509 v3 extensions let conf = Conf::new(ConfMethod::default())?; let x509v3_context = match issuer { Some(issuer_data) => x509_builder.x509v3_context(Some(&issuer_data.cert), Some(&conf)), None => x509_builder.x509v3_context(None, Some(&conf)), }; let mut extensions: Vec = Vec::new(); let x509_extension_subject_key_identifier = SubjectKeyIdentifier::new().build(&x509v3_context)?; extensions.push(x509_extension_subject_key_identifier); // CA usage if issuer.is_none() { // CA usage let x509_basic_constraint_ca = BasicConstraints::new().critical().ca().pathlen(1).build()?; extensions.push(x509_basic_constraint_ca); } else { let x509_basic_constraint_ca = BasicConstraints::new().critical().build()?; extensions.push(x509_basic_constraint_ca); } // set key usage if issuer.is_some() { if settings.digital_signature_key_usage { let key_usage = KeyUsage::new().critical().digital_signature().build()?; extensions.push(key_usage); } if settings.code_signing_extended_key_usage { let extended_key_usage = ExtendedKeyUsage::new().code_signing().build()?; extensions.push(extended_key_usage); } } else { let key_usage = KeyUsage::new() .critical() .crl_sign() .key_cert_sign() .build()?; extensions.push(key_usage); } // extensions that diverge, based on whether we're creating the CA or // a certificate issued by it if issuer.is_none() { } else { let x509_extension_authority_key_identifier = AuthorityKeyIdentifier::new() .keyid(true) .build(&x509v3_context)?; extensions.push(x509_extension_authority_key_identifier); if settings.subject_email.is_some() && settings.subject_url.is_some() { panic!( "cosign doesn't generate certificates with a SAN that has both email and url" ); } if let Some(email) = settings.subject_email { let x509_extension_san = SubjectAlternativeName::new() .critical() .email(&email) .build(&x509v3_context)?; extensions.push(x509_extension_san); }; if let Some(url) = settings.subject_url { let x509_extension_san = SubjectAlternativeName::new() .critical() .uri(&url) .build(&x509v3_context)?; extensions.push(x509_extension_san); } // // TODO: uncomment once https://github.com/sfackler/rust-openssl/issues/1411 // is fixed. This would allow to test also the parsing of the custom fields // added to certificate extensions //if let Some(subject_issuer) = settings.subject_issuer { // let sigstore_issuer_asn1_obj = Asn1Object::from_str("1.3.6.1.4.1.57264.1.1")?; //&SIGSTORE_ISSUER_OID.to_string())?; // let value = format!("ASN1:UTF8String:{}", subject_issuer); // let sigstore_subject_issuer_extension = X509Extension::new_nid( // None, // Some(&x509v3_context), // sigstore_issuer_asn1_obj.nid(), // //&subject_issuer, // &value, // )?; // extensions.push(sigstore_subject_issuer_extension); //} } for ext in extensions { x509_builder.append_extension(ext)?; } // setup validity let not_before = Asn1Time::from_unix(settings.not_before.timestamp())?; let not_after = Asn1Time::from_unix(settings.not_after.timestamp())?; x509_builder.set_not_after(¬_after)?; x509_builder.set_not_before(¬_before)?; // set issuer if let Some(issuer_data) = issuer { let issuer_name = issuer_data.cert.subject_name(); x509_builder.set_issuer_name(issuer_name)?; } else { // self signed cert x509_builder.set_issuer_name(&x509_name)?; } // sign the cert let issuer_pkey = match issuer { Some(issuer_data) => issuer_data.private_key.clone(), None => settings.private_key.clone(), }; x509_builder .sign(&issuer_pkey, MessageDigest::sha256()) .expect("Cannot sign certificate"); let x509 = x509_builder.build(); Ok(CertData { cert: x509, private_key: settings.private_key, }) } } ================================================ FILE: src/crypto/signing_key/ecdsa/ec.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! # ECDSA Keys in Generic Types //! //! This is a wrapper for Rust Crypto. Basically it //! is implemented using generic types and traits. Generic types //! may let the user to manually include concrete crates like //! `p256`, `p384`, `digest`, etc. This is unfriendly to users. //! To make it easier for an user to use, there are two wrappers: //! * The [`EcdsaKeys`] generic struct is wrapped in an enum named [`ECDSAKeys`]. //! * The [`EcdsaSigner`] generic struct is wrapped in an enum named [`super::SigStoreSigner`]. //! //! The [`ECDSAKeys`] has two enums due to their underlying elliptic curves, s.t. //! * `P256` //! * `P384` //! //! To have an uniform interface for all kinds of asymmetric keys, [`ECDSAKeys`] //! is also wrapped in [`super::super::SigStoreKeyPair`] enum. //! //! The [`super::SigStoreSigner`] enum includes two enums for [`EcdsaSigner`]: //! * `ECDSA_P256_SHA256_ASN1` //! * `ECDSA_P384_SHA384_ASN1` //! //! # EC Key Pair Operations //! //! *Not recommend to directly use this mod. Use [`ECDSAKeys`], [`super::super::SigStoreKeyPair`] for //! key pair and [`super::SigStoreSigner`] for signing instead* //! //! When to generate an EC key pair, a specific elliptic curve //! should be chosen. Supported elliptic curves are listed //! . //! //! For example, use `P256` as elliptic curve, and `ECDSA_P256_SHA256_ASN1` as //! signing scheme //! //! ```rust //! use sigstore::crypto::signing_key::{ecdsa::ec::{EcdsaKeys,EcdsaSigner}, KeyPair, Signer}; //! //! let ec_key_pair = EcdsaKeys::::new().unwrap(); //! //! // export the pem encoded public key. //! let pubkey = ec_key_pair.public_key_to_pem().unwrap(); //! //! // export the private key using sigstore encryption. //! let privkey = ec_key_pair.private_key_to_encrypted_pem(b"password").unwrap(); //! //! // sign with the new key, using Sha256 as the digest scheme. //! // In fact, the signing scheme is ECDSA_P256_SHA256_ASN1 here. //! let ec_signer = EcdsaSigner::<_, sha2::Sha256>::from_ecdsa_keys(&ec_key_pair).unwrap(); //! //! let signature = ec_signer.sign(b"some message"); //! ``` use std::{marker::PhantomData, ops::Add}; use digest::{ Digest, FixedOutput, FixedOutputReset, core_api::BlockSizeUser, typenum::{ UInt, UTerm, bit::{B0, B1}, }, }; use ecdsa::{ PrimeCurve, SignatureSize, SigningKey, hazmat::{DigestPrimitive, SignPrimitive}, }; #[allow(deprecated)] use elliptic_curve::generic_array::ArrayLength; use elliptic_curve::{ AffinePoint, Curve, CurveArithmetic, FieldBytesSize, PublicKey, Scalar, SecretKey, bigint::ArrayEncoding, ops::{Invert, Reduce}, sec1::{FromEncodedPoint, ModulusSize, ToEncodedPoint}, subtle::CtOption, zeroize::Zeroizing, }; use pkcs8::{AssociatedOid, DecodePrivateKey, EncodePrivateKey, EncodePublicKey}; use signature::DigestSigner; use crate::{ crypto::{ SigningScheme, signing_key::{ COSIGN_PRIVATE_KEY_PEM_LABEL, KeyPair, PRIVATE_KEY_PEM_LABEL, SIGSTORE_PRIVATE_KEY_PEM_LABEL, Signer, kdf, }, verification_key::CosignVerificationKey, }, errors::*, }; use super::ECDSAKeys; /// The generic parameter for `C` can be chosen from the following: /// * `p256::NistP256`: `P-256`, also known as `secp256r1` or `prime256v1`. /// * `p384::NistP384`: `P-384`, also known as `secp384r1`. /// /// More elliptic curves, please refer to /// . #[derive(Clone, Debug)] pub struct EcdsaKeys where C: Curve + CurveArithmetic + pkcs8::AssociatedOid, { ec_seckey: SecretKey, public_key: PublicKey, } impl EcdsaKeys where C: Curve + AssociatedOid + CurveArithmetic + PrimeCurve, AffinePoint: FromEncodedPoint + ToEncodedPoint, FieldBytesSize: ModulusSize, { /// Create a new `EcdsaKeys` Object, the generic parameter indicates /// the elliptic curve. Please refer to /// for curves. /// The secret key (private key) will be randomly /// generated. pub fn new() -> Result { let ec_seckey: SecretKey = SecretKey::random(&mut rand::rngs::OsRng); let public_key = ec_seckey.public_key(); Ok(EcdsaKeys { ec_seckey, public_key, }) } /// Builds a `EcdsaKeys` from encrypted pkcs8 PEM-encoded private key. /// The label should be [`COSIGN_PRIVATE_KEY_PEM_LABEL`] or /// [`SIGSTORE_PRIVATE_KEY_PEM_LABEL`]. pub fn from_encrypted_pem(private_key: &[u8], password: &[u8]) -> Result { let key = pem::parse(private_key)?; match key.tag() { COSIGN_PRIVATE_KEY_PEM_LABEL | SIGSTORE_PRIVATE_KEY_PEM_LABEL => { let der = kdf::decrypt(key.contents(), password)?; let pkcs8 = pkcs8::PrivateKeyInfo::try_from(&der[..]).map_err(|e| { SigstoreError::PKCS8Error(format!("Read PrivateKeyInfo failed: {e}")) })?; let ec_seckey = SecretKey::::from_sec1_der(pkcs8.private_key)?; Self::from_private_key(ec_seckey) } PRIVATE_KEY_PEM_LABEL if password.is_empty() => Self::from_pem(private_key), PRIVATE_KEY_PEM_LABEL if !password.is_empty() => { Err(SigstoreError::PrivateKeyDecryptError( "Unencrypted private key but password provided".into(), )) } tag => Err(SigstoreError::PrivateKeyDecryptError(format!( "Unsupported pem tag {tag}" ))), } } /// Builds a `EcdsaKeys` from a pkcs8 PEM-encoded private key. /// The label of PEM should be [`PRIVATE_KEY_PEM_LABEL`] pub fn from_pem(pem_data: &[u8]) -> Result { let pem_data = std::str::from_utf8(pem_data)?; let (label, document) = pkcs8::SecretDocument::from_pem(pem_data) .map_err(|e| SigstoreError::PKCS8DerError(e.to_string()))?; match label { PRIVATE_KEY_PEM_LABEL => { let ec_seckey = SecretKey::::from_pkcs8_der(document.as_bytes()).map_err(|e| { SigstoreError::PKCS8Error(format!( "Convert from pkcs8 pem to ecdsa private key failed: {e}" )) })?; Self::from_private_key(ec_seckey) } tag => Err(SigstoreError::PrivateKeyDecryptError(format!( "Unsupported pem tag {tag}" ))), } } /// Builds a `EcdsaKeys` from a pkcs8 asn.1 private key. pub fn from_der(private_key: &[u8]) -> Result { let ec_seckey = SecretKey::::from_pkcs8_der(private_key).map_err(|e| { SigstoreError::PKCS8Error(format!( "Convert from pkcs8 der to ecdsa private key failed: {e}" )) })?; Self::from_private_key(ec_seckey) } /// Builds a `EcdsaKeys` from a private key. fn from_private_key(ec_seckey: SecretKey) -> Result { let public_key = ec_seckey.public_key(); Ok(Self { ec_seckey, public_key, }) } /// Convert the [`EcdsaKeys`] into [`ECDSAKeys`]. pub fn to_wrapped_ecdsa_keys(&self) -> Result { let priv_key = self.private_key_to_der()?; ECDSAKeys::from_der(&priv_key[..]) } } impl KeyPair for EcdsaKeys where C: Curve + AssociatedOid + CurveArithmetic + PrimeCurve, AffinePoint: FromEncodedPoint + ToEncodedPoint, FieldBytesSize: ModulusSize, { /// Return the public key in PEM-encoded SPKI format. fn public_key_to_pem(&self) -> Result { self.public_key .to_public_key_pem(pkcs8::LineEnding::LF) .map_err(|e| SigstoreError::PKCS8SpkiError(e.to_string())) } /// Return the private key in pkcs8 PEM-encoded format. fn private_key_to_pem(&self) -> Result> { self.ec_seckey .to_pkcs8_pem(pkcs8::LineEnding::LF) .map_err(|e| SigstoreError::PKCS8SpkiError(e.to_string())) } /// Return the public key in asn.1 SPKI format. fn public_key_to_der(&self) -> Result> { Ok(self .public_key .to_public_key_der() .map_err(|e| SigstoreError::PKCS8SpkiError(e.to_string()))? .to_vec()) } /// Return the private key in asn.1 pkcs8 format. fn private_key_to_der(&self) -> Result>> { let pkcs8 = self .ec_seckey .to_pkcs8_der() .map_err(|e| SigstoreError::PKCS8Error(e.to_string()))?; Ok(pkcs8.to_bytes()) } /// Return the encrypted private key in PEM-encoded format. fn private_key_to_encrypted_pem(&self, password: &[u8]) -> Result> { let der = self.private_key_to_der()?; let pem = pem::Pem::new( SIGSTORE_PRIVATE_KEY_PEM_LABEL, kdf::encrypt(&der, password)?, ); let pem = pem::encode(&pem); Ok(zeroize::Zeroizing::new(pem)) } /// Derive the relative [`CosignVerificationKey`]. fn to_verification_key(&self, signing_scheme: &SigningScheme) -> Result { let pem = self.public_key_to_pem()?; CosignVerificationKey::from_pem(pem.as_bytes(), signing_scheme) } } /// `EcdsaSigner` is used to generate a ECDSA signature. /// The generic parameter `C` here can be chosen from /// /// * `p256::NistP256`: `P-256`, also known as `secp256r1` or `prime256v1`. /// * `p384::NistP384`: `P-384`, also known as `secp384r1`. /// /// More elliptic curves, please refer to /// . /// /// And the parameter `D` indicates the digest algorithm. /// /// For concrete digest algorithms, please refer to /// . #[allow(deprecated)] #[derive(Clone, Debug)] pub struct EcdsaSigner where C: PrimeCurve + CurveArithmetic + AssociatedOid, Scalar: Invert>> + Reduce + SignPrimitive, C::Uint: for<'a> From<&'a Scalar>, SignatureSize: ArrayLength, D: Digest + BlockSizeUser + FixedOutput> + FixedOutputReset, { signing_key: SigningKey, ecdsa_keys: EcdsaKeys, _marker: PhantomData, } #[allow(deprecated)] impl EcdsaSigner where C: PrimeCurve + CurveArithmetic + AssociatedOid, Scalar: Invert>> + Reduce + SignPrimitive, AffinePoint: FromEncodedPoint + ToEncodedPoint, FieldBytesSize: ModulusSize, C::Uint: for<'a> From<&'a Scalar>, SignatureSize: ArrayLength, D: Digest + BlockSizeUser + FixedOutput> + FixedOutputReset, { /// Create a new `EcdsaSigner` from the given `EcdsaKeys` and `SignatureDigestAlgorithm` pub fn from_ecdsa_keys(ecdsa_keys: &EcdsaKeys) -> Result { let signing_key = ecdsa::SigningKey::::from_pkcs8_der( &ecdsa_keys.private_key_to_der()?[..], ) .map_err(|e| { SigstoreError::PKCS8Error(format!( "Convert from pkcs8 der to ecdsa private key failed: {e}" )) })?; Ok(Self { signing_key, ecdsa_keys: ecdsa_keys.clone(), _marker: PhantomData, }) } /// Return the ref to the keypair inside the signer pub fn ecdsa_keys(&self) -> &EcdsaKeys { &self.ecdsa_keys } } #[allow(deprecated)] impl Signer for EcdsaSigner where C: PrimeCurve + CurveArithmetic + AssociatedOid + DigestPrimitive, Scalar: Invert>> + Reduce + SignPrimitive, SigningKey: ecdsa::signature::Signer>, C::Uint: for<'a> From<&'a Scalar>, <::FieldBytesSize as Add>::Output: Add, B0>, B0>, B1>>, <<::FieldBytesSize as Add>::Output as Add< UInt, B0>, B0>, B1>, >>::Output: ArrayLength, SignatureSize: ArrayLength, <::Uint as ArrayEncoding>::ByteSize: ModulusSize, ::FieldBytesSize: ModulusSize, ::AffinePoint: ToEncodedPoint, ::AffinePoint: FromEncodedPoint, D: Digest + BlockSizeUser + FixedOutput> + FixedOutputReset, { /// Sign the given message, and generate a signature. /// The message will firstly be hashed with the given /// digest algorithm `D`. And then, ECDSA signature /// algorithm will sign the digest. /// /// The outcome digest will be encoded in `asn.1`. fn sign(&self, msg: &[u8]) -> Result> { let mut hasher = D::new(); digest::Digest::update(&mut hasher, msg); let (sig, _recovery_id) = self.signing_key.try_sign_digest(hasher)?; Ok(sig.to_der().to_bytes().to_vec()) } /// Return the ref to the keypair inside the signer fn key_pair(&self) -> &dyn KeyPair { &self.ecdsa_keys } } #[cfg(test)] mod tests { use std::fs; use rstest::rstest; use crate::crypto::{ Signature, SigningScheme, signing_key::{KeyPair, Signer, tests::MESSAGE}, verification_key::CosignVerificationKey, }; use super::{EcdsaKeys, EcdsaSigner}; const PASSWORD: &[u8] = b"123"; const EMPTY_PASSWORD: &[u8] = b""; /// This test will try to read an unencrypted ecdsa /// private key file, which is generated by `sigstore`. #[test] fn ecdsa_from_unencrypted_pem() { let content = fs::read("tests/data/keys/ecdsa_private.key") .expect("read tests/data/keys/ecdsa_private.key failed."); let key = EcdsaKeys::::from_pem(&content); assert!( key.is_ok(), "can not create EcdsaKeys from unencrypted PEM file." ); } /// This test will try to read an encrypted ecdsa /// private key file, which is generated by `sigstore`. #[rstest] #[case("tests/data/keys/ecdsa_encrypted_private.key", PASSWORD)] #[case::empty_password( "tests/data/keys/cosign_generated_encrypted_empty_private.key", EMPTY_PASSWORD )] #[case::empty_password_unencrypted("tests/data/keys/ecdsa_private.key", EMPTY_PASSWORD)] fn ecdsa_from_encrypted_pem(#[case] keypath: &str, #[case] password: &[u8]) { let content = fs::read(keypath).expect("read key failed."); let key = EcdsaKeys::::from_encrypted_pem(&content, password); assert!( key.is_ok(), "can not create EcdsaKeys from encrypted PEM file" ); } /// This test will try to encrypt a ecdsa keypair and /// return the pem-encoded contents. #[rstest] #[case(PASSWORD)] #[case::empty_password(EMPTY_PASSWORD)] fn ecdsa_to_encrypted_pem(#[case] password: &[u8]) { let key = EcdsaKeys::::new().expect("create ecdsa keys with P256 curve failed."); let key = key.private_key_to_encrypted_pem(password); assert!( key.is_ok(), "can not export private key in encrypted PEM format." ); } /// This test will ensure that an unencrypted /// keypair will fail to read if a non-empty /// password is given. #[test] fn ecdsa_error_unencrypted_pem_password() { let content = fs::read("tests/data/keys/ecdsa_private.key").expect("read key failed."); let key = EcdsaKeys::::from_encrypted_pem(&content, PASSWORD); assert!( key.is_err_and(|e| e .to_string() .contains("Unencrypted private key but password provided")), "read unencrypted key with password" ); } /// This test will generate a EcdsaKeys, encode the private key /// it into pem, and decode a new key from the generated pem-encoded /// private key. #[test] fn ecdsa_to_and_from_pem() { let key = EcdsaKeys::::new().expect("create ecdsa keys with P256 curve failed."); let key = key .private_key_to_pem() .expect("export private key to PEM format failed."); let key = EcdsaKeys::::from_pem(key.as_bytes()); assert!(key.is_ok(), "can not create EcdsaKeys from PEM string."); } /// This test will generate a EcdsaKeys, encode the private key /// it into pem, and decode a new key from the generated pem-encoded /// private key. #[rstest] #[case(PASSWORD)] #[case::empty_password(EMPTY_PASSWORD)] fn ecdsa_to_and_from_encrypted_pem(#[case] password: &[u8]) { let key = EcdsaKeys::::new().expect("create ecdsa keys with P256 curve failed."); let key = key .private_key_to_encrypted_pem(password) .expect("export private key to PEM format failed."); let key = EcdsaKeys::::from_encrypted_pem(key.as_bytes(), password); assert!(key.is_ok(), "can not create EcdsaKeys from PEM string."); } /// This test will generate a EcdsaKeys, encode the private key /// it into der, and decode a new key from the generated der-encoded /// private key. #[test] fn ecdsa_to_and_from_der() { let key = EcdsaKeys::::new().expect("create ecdsa keys with P256 curve failed."); let key = key .private_key_to_der() .expect("export private key to DER format failed."); let key = EcdsaKeys::::from_der(&key); assert!(key.is_ok(), "can not create EcdsaKeys from DER bytes.") } /// This test will generate a ecdsa-P256 keypair. /// And then use the verification key interface to instantial /// a VerificationKey object. #[test] fn ecdsa_generate_public_key() { let key = EcdsaKeys::::new().expect("create ecdsa keys with P256 curve failed."); let pubkey = key .public_key_to_pem() .expect("export private key to PEM format failed."); assert!( CosignVerificationKey::from_pem(pubkey.as_bytes(), &SigningScheme::default(),).is_ok() ); let pubkey = key .public_key_to_der() .expect("export private key to DER format failed."); assert!( CosignVerificationKey::from_der(&pubkey, &SigningScheme::default()).is_ok(), "can not create CosignVerificationKey from der bytes." ); } /// This test will generate a ecdsa-P256 keypair. /// And then derive a `CosignVerificationKey` from it. #[test] fn ecdsa_derive_verification_key() { let key = EcdsaKeys::::new().expect("create ecdsa keys with P256 curve failed."); assert!( key.to_verification_key(&SigningScheme::default()).is_ok(), "can not create CosignVerificationKey from EcdsaKeys via `to_verification_key`." ); } /// This test will do the following things: /// * Generate a ecdsa-P256 keypair. /// * Sign the MESSAGE with the private key and digest algorithm SHA256, /// then generate a signature. /// * Verify the signature using the public key. #[test] fn ecdsa_sign_and_verify() { let key = EcdsaKeys::::new().expect("create ecdsa keys with P256 curve failed."); let pubkey = key .public_key_to_pem() .expect("export private key to PEM format failed."); let signer = EcdsaSigner::<_, sha2::Sha256>::from_ecdsa_keys(&key) .expect("create EcdsaSigner from ecdsa keys failed."); let sig = signer .sign(MESSAGE.as_bytes()) .expect("signing message failed."); let verification_key = CosignVerificationKey::from_pem( pubkey.as_bytes(), &SigningScheme::ECDSA_P256_SHA256_ASN1, ) .expect("convert CosignVerificationKey from public key failed."); let signature = Signature::Raw(&sig); assert!( verification_key .verify_signature(signature, MESSAGE.as_bytes()) .is_ok(), "can not verify the signature." ); } } ================================================ FILE: src/crypto/signing_key/ecdsa/mod.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! # ECDSA Key Enums //! //! This is a wrapper for [`EcdsaKeys`] and [`EcdsaSigner`]. Because //! both [`EcdsaKeys`] and [`EcdsaSigner`] are generic types, they //! may let the user to manually include concrete underlying elliptic //! curves like `p256`, `p384`, and concrete digest algorithm crates //! like `sha2`. To avoid this, we use [`ECDSAKeys`] enum to wrap //! the generic type [`EcdsaKeys`]. //! //! # EC Key Pair Operations //! //! This wrapper provides two underlying elliptic curves, s.t. //! * `P256`: `P-256`, also known as `secp256r1` or `prime256v1`. //! * `P384`: `P-384`, also known as `secp384r1`. //! //! We take `P256` for example to show the operaions: //! ```rust //! use sigstore::crypto::signing_key::ecdsa::{ECDSAKeys, EllipticCurve}; //! use sigstore::crypto::Signature; //! //! // generate a new EC-P256 key pair //! let ec_key_pair = ECDSAKeys::new(EllipticCurve::P256).unwrap(); //! //! // export the pem encoded public key. //! // here `as_inner()` will return the reference of `KeyPair` trait object //! // underlying this `ECDSAKeys` for key pair operaions. //! let pubkey = ec_key_pair.as_inner().public_key_to_pem().unwrap(); //! //! // export the private key using sigstore encryption. //! let privkey = ec_key_pair.as_inner().private_key_to_encrypted_pem(b"password").unwrap(); //! //! // also, we can import an [`ECDSAKeys`] of unknown elliptic curve at compile //! // time using functions with the prefix `ECDSAKeys::from_`. These functions //! // will try to decode the given ecdsa private key using all [`EllipticCurve`] //! // enums (suppose the given private key is in PKCS8 format. The PKCS8 //! // format will carry the key algorithm and its underlying elliptic curve //! // identity). If one of them succeeds, return the enum. If all fail, return //! // an error. For example: //! // let ec_key_pair_import = ECDSAKeys::from_pem(PEM_CONTENT).unwrap(); //! //! // convert this EC key into an [`SigStoreSigner`] enum to sign some data. //! // Although different EC key can combine with different digest algorithms to //! // form a signing scheme, `P256` is recommended to work with `Sha256` and //! // `P384` is recommended to work with `Sha384`. So here we do not include //! // extra parameter `SignatureDigestAlgorithm` for `to_sigstore_signer()`. //! let ec_signer = ec_key_pair.to_sigstore_signer().unwrap(); //! //! // test message to be signed //! let message = b"some message"; //! //! // sign using //! let signature_data = ec_signer.sign(message).unwrap(); //! //! // export the [`CosignVerificationKey`] from the [`SigStoreSigner`], which //! // is used to verify the signature. //! let verification_key = ec_signer.to_verification_key().unwrap(); //! //! // verify //! assert!(verification_key.verify_signature(Signature::Raw(&signature_data),message).is_ok()); /// ``` use crate::errors::*; use self::ec::{EcdsaKeys, EcdsaSigner}; use super::{KeyPair, SigStoreSigner}; pub mod ec; pub enum ECDSAKeys { P256(EcdsaKeys), P384(EcdsaKeys), } impl std::fmt::Display for ECDSAKeys { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { ECDSAKeys::P256(_) => write!(f, "ECDSA P256"), ECDSAKeys::P384(_) => write!(f, "ECDSA P384"), } } } /// The types of supported elliptic curves: /// * `P256`: `P-256`, also known as `secp256r1` or `prime256v1`. /// * `P384`: `P-384`, also known as `secp384r1`. pub enum EllipticCurve { P256, P384, } /// This macro helps to reduce duplicated code. macro_rules! iterate_on_curves { ($func: ident ($($args:expr),*), $errorinfo: literal) => { if let Ok(keys) = EcdsaKeys::::$func($($args,)*) { Ok(ECDSAKeys::P256(keys)) } else if let Ok(keys) = EcdsaKeys::::$func($($args,)*) { Ok(ECDSAKeys::P384(keys)) } else { Err(SigstoreError::KeyParseError($errorinfo.to_string())) } } } impl ECDSAKeys { /// Create a new [`ECDSAKeys`] due to the given [`EllipticCurve`]. pub fn new(curve: EllipticCurve) -> Result { Ok(match curve { EllipticCurve::P256 => ECDSAKeys::P256(EcdsaKeys::::new()?), EllipticCurve::P384 => ECDSAKeys::P384(EcdsaKeys::::new()?), }) } /// Return the inner `KeyPair` of the enum. This function /// is useful in the inner interface conversion. pub fn as_inner(&self) -> &dyn KeyPair { match self { ECDSAKeys::P256(inner) => inner, ECDSAKeys::P384(inner) => inner, } } /// Builds a `EcdsaKeys` from encrypted pkcs8 PEM-encoded private key. /// The label should be [`super::COSIGN_PRIVATE_KEY_PEM_LABEL`] or /// [`super::SIGSTORE_PRIVATE_KEY_PEM_LABEL`]. pub fn from_encrypted_pem(private_key: &[u8], password: &[u8]) -> Result { iterate_on_curves!( from_encrypted_pem(private_key, password), "Ecdsa keys from encrypted PEM private key" ) } /// Builds a `EcdsaKeys` from a pkcs8 PEM-encoded private key. /// The label of PEM should be [`super::PRIVATE_KEY_PEM_LABEL`] pub fn from_pem(pem_data: &[u8]) -> Result { iterate_on_curves!(from_pem(pem_data), "Ecdsa keys from PEM private key") } /// Builds a `EcdsaKeys` from a pkcs8 asn.1 private key. pub fn from_der(private_key: &[u8]) -> Result { iterate_on_curves!(from_der(private_key), "Ecdsa keys from DER private key") } /// `to_sigstore_signer` will create the [`SigStoreSigner`] using /// this Ecdsa private key. This function does not receive any parameter /// to indicate the digest algorthm, because the common signing schemes /// for ecdsa-p256 is `ECDSA_P256_SHA256`, and for ecdsa-p384 is /// `ECDSA_P384_SHA384`. pub fn to_sigstore_signer(&self) -> Result { Ok(match self { ECDSAKeys::P256(inner) => { SigStoreSigner::ECDSA_P256_SHA256_ASN1( EcdsaSigner::<_, sha2::Sha256>::from_ecdsa_keys(inner)?, ) } ECDSAKeys::P384(inner) => { SigStoreSigner::ECDSA_P384_SHA384_ASN1( EcdsaSigner::<_, sha2::Sha384>::from_ecdsa_keys(inner)?, ) } }) } } ================================================ FILE: src/crypto/signing_key/ed25519.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! # Ed25519 Keys //! //! This is a wrapper for Rust Crypto. There two main types in this mod: //! * [`Ed25519Keys`]: provides basic key pair operaions //! * [`Ed25519Signer`]: provides signing operaion //! //! The `signing_key` will wrap [`Ed25519Keys`] into [`super::SigStoreKeyPair`] enum, //! and [`Ed25519Signer`] into [`SigStoreSigner`] enum. //! //! # Ed25519 Key Operaions //! //! We give an example for the mod //! ```rust //! use sigstore::crypto::signing_key::ed25519::Ed25519Keys; //! use sigstore::crypto::{signing_key::KeyPair, Signature}; //! //! // generate a new Ed25519 key pair //! let ed25519_key_pair = Ed25519Keys::new().unwrap(); //! //! // export the pem encoded public key. //! let pubkey = ed25519_key_pair.public_key_to_pem().unwrap(); //! //! // export the private key using sigstore encryption. //! let privkey = ed25519_key_pair.private_key_to_encrypted_pem(b"password").unwrap(); //! //! // also, we can import a Ed25519 using functions with the prefix //! // `Ed25519Keys::from_`. These functions will treat the given //! // data as Ed25519 private key in PKCS8 format. For example: //! // let ed25519_key_pair_import = Ed25519Keys::from_pem(PEM_CONTENT).unwrap(); //! //! // convert this Ed25519 key into an [`super::SigStoreSigner`] enum to sign some data. //! let ed25519_signer = ed25519_key_pair.to_sigstore_signer().unwrap(); //! //! // test message to be signed //! let message = b"some message"; //! //! // sign using //! let signature = ed25519_signer.sign(message).unwrap(); //! //! // export the [`CosignVerificationKey`] from the [`super::SigStoreSigner`], which //! // is used to verify the signature. //! let verification_key = ed25519_signer.to_verification_key().unwrap(); //! //! // verify //! assert!(verification_key.verify_signature(Signature::Raw(&signature),message).is_ok()); //! ``` use ed25519::pkcs8::{DecodePrivateKey, EncodePrivateKey, EncodePublicKey}; use ed25519::KeypairBytes; use ed25519_dalek::{Signer as _, SigningKey}; use crate::{ crypto::{SigningScheme, verification_key::CosignVerificationKey}, errors::*, }; use super::{ COSIGN_PRIVATE_KEY_PEM_LABEL, KeyPair, PRIVATE_KEY_PEM_LABEL, SIGSTORE_PRIVATE_KEY_PEM_LABEL, SigStoreSigner, Signer, kdf, }; #[derive(Debug, Clone)] pub struct Ed25519Keys { signing_key: ed25519_dalek::SigningKey, verifying_key: ed25519_dalek::VerifyingKey, } impl Ed25519Keys { /// Create a new `Ed25519Keys` Object. /// The private key will be randomly /// generated. pub fn new() -> Result { let mut csprng = rand::rngs::OsRng {}; let signing_key = SigningKey::generate(&mut csprng); let verifying_key = signing_key.verifying_key(); Ok(Self { signing_key, verifying_key, }) } /// Create a new `Ed25519Keys` Object from given `Ed25519Keys` Object. pub fn from_ed25519key(key: &Ed25519Keys) -> Result { let priv_key = key.private_key_to_der()?; Ed25519Keys::from_der(&priv_key[..]) } /// Builds a `Ed25519Keys` from encrypted pkcs8 PEM-encoded private key. /// The label should be [`COSIGN_PRIVATE_KEY_PEM_LABEL`] or /// [`SIGSTORE_PRIVATE_KEY_PEM_LABEL`]. pub fn from_encrypted_pem(encrypted_pem: &[u8], password: &[u8]) -> Result { let key = pem::parse(encrypted_pem)?; match key.tag() { COSIGN_PRIVATE_KEY_PEM_LABEL | SIGSTORE_PRIVATE_KEY_PEM_LABEL => { let der = kdf::decrypt(key.contents(), password)?; let pkcs8 = ed25519_dalek::pkcs8::PrivateKeyInfo::try_from(&der[..]).map_err(|e| { SigstoreError::PKCS8Error(format!("Read PrivateKeyInfo failed: {e}")) })?; let key_pair_bytes = KeypairBytes::try_from(pkcs8).map_err(|e| { SigstoreError::PKCS8Error(format!( "Convert from pkcs8 pem to ed25519 private key failed: {e}" )) })?; Self::from_key_pair_bytes(key_pair_bytes) } PRIVATE_KEY_PEM_LABEL if password.is_empty() => Self::from_pem(encrypted_pem), PRIVATE_KEY_PEM_LABEL if !password.is_empty() => { Err(SigstoreError::PrivateKeyDecryptError( "Unencrypted private key but password provided".into(), )) } tag => Err(SigstoreError::PrivateKeyDecryptError(format!( "Unsupported pem tag {tag}" ))), } } /// Builds a `Ed25519Keys` from a pkcs8 PEM-encoded private key. /// The label of PEM should be [`PRIVATE_KEY_PEM_LABEL`] pub fn from_pem(pem: &[u8]) -> Result { let pem = std::str::from_utf8(pem)?; let (label, document) = pkcs8::SecretDocument::from_pem(pem) .map_err(|e| SigstoreError::PKCS8DerError(e.to_string()))?; match label { PRIVATE_KEY_PEM_LABEL => { let pkcs8 = ed25519_dalek::pkcs8::PrivateKeyInfo::try_from(document.as_bytes()) .map_err(|e| { SigstoreError::PKCS8Error(format!("Read PrivateKeyInfo failed: {e}")) })?; let key_pair_bytes = KeypairBytes::try_from(pkcs8).map_err(|e| { SigstoreError::PKCS8Error(format!( "Convert from pkcs8 pem to ed25519 private key failed: {e}" )) })?; Self::from_key_pair_bytes(key_pair_bytes) } tag => Err(SigstoreError::PrivateKeyDecryptError(format!( "Unsupported pem tag {tag}" ))), } } /// Builds a `Ed25519Keys` from a pkcs8 asn.1 private key. pub fn from_der(der_bytes: &[u8]) -> Result { let key_pair_bytes = KeypairBytes::from_pkcs8_der(der_bytes).map_err(|e| { SigstoreError::PKCS8Error(format!( "Convert from pkcs8 der to ed25519 private key failed: {e}" )) })?; Self::from_key_pair_bytes(key_pair_bytes) } /// Builds a `Ed25519Keys` from a `KeypairBytes`. fn from_key_pair_bytes(key_pair_bytes: KeypairBytes) -> Result { let signing_key = ed25519_dalek::SigningKey::from_keypair_bytes( &key_pair_bytes.to_bytes().ok_or_else(|| { SigstoreError::PKCS8SpkiError("No public key info in given key_pair_bytes.".into()) })?, )?; let verifying_key = signing_key.verifying_key(); Ok(Self { signing_key, verifying_key, }) } /// `to_sigstore_signer` will create the [`SigStoreSigner`] using /// this ed25519 private key. pub fn to_sigstore_signer(&self) -> Result { Ok(SigStoreSigner::ED25519(Ed25519Signer::from_ed25519_keys( self, )?)) } } impl KeyPair for Ed25519Keys { /// Return the public key in PEM-encoded SPKI format. fn public_key_to_pem(&self) -> Result { self.verifying_key .to_public_key_pem(pkcs8::LineEnding::LF) .map_err(|e| SigstoreError::PKCS8SpkiError(e.to_string())) } /// Return the public key in asn.1 SPKI format. fn public_key_to_der(&self) -> Result> { Ok(self .verifying_key .to_public_key_der() .map_err(|e| SigstoreError::PKCS8SpkiError(e.to_string()))? .to_vec()) } /// Return the encrypted asn.1 pkcs8 private key. fn private_key_to_encrypted_pem(&self, password: &[u8]) -> Result> { let der = self.private_key_to_der()?; let pem = pem::Pem::new( SIGSTORE_PRIVATE_KEY_PEM_LABEL, kdf::encrypt(&der, password)?, ); let pem = pem::encode(&pem); Ok(zeroize::Zeroizing::new(pem)) } /// Return the private key in pkcs8 PEM-encoded format. fn private_key_to_pem(&self) -> Result> { self.signing_key .to_pkcs8_der() .map_err(|e| SigstoreError::PKCS8SpkiError(e.to_string()))? .to_pem(PRIVATE_KEY_PEM_LABEL, pkcs8::LineEnding::LF) .map_err(|e| SigstoreError::PKCS8SpkiError(e.to_string())) } /// Return the private key in asn.1 pkcs8 format. fn private_key_to_der(&self) -> Result>> { let pkcs8 = self .signing_key .to_pkcs8_der() .map_err(|e| SigstoreError::PKCS8Error(e.to_string()))?; Ok(pkcs8.to_bytes()) } /// Derive the relative [`CosignVerificationKey`]. fn to_verification_key( &self, _signature_digest_algorithm: &SigningScheme, ) -> Result { let der = self.public_key_to_der()?; let res = CosignVerificationKey::from_der(&der, &SigningScheme::ED25519)?; Ok(res) } } #[derive(Debug)] pub struct Ed25519Signer { key_pair: Ed25519Keys, } impl Ed25519Signer { pub fn from_ed25519_keys(ed25519_keys: &Ed25519Keys) -> Result { Ok(Self { key_pair: ed25519_keys.clone(), }) } /// Return the ref to the keypair inside the signer pub fn ed25519_keys(&self) -> &Ed25519Keys { &self.key_pair } } impl Signer for Ed25519Signer { /// Return the ref to the keypair inside the signer fn key_pair(&self) -> &dyn KeyPair { &self.key_pair } /// Sign the given message using Ed25519 fn sign(&self, msg: &[u8]) -> Result> { let signature = self.key_pair.signing_key.try_sign(msg)?; Ok(signature.to_vec()) } } #[cfg(test)] mod tests { use std::fs; use rstest::rstest; use crate::crypto::{ Signature, SigningScheme, signing_key::{KeyPair, Signer, tests::MESSAGE}, verification_key::CosignVerificationKey, }; use super::{Ed25519Keys, Ed25519Signer}; const PASSWORD: &[u8] = b"123"; const EMPTY_PASSWORD: &[u8] = b""; /// This test will try to read an unencrypted ed25519 /// private key file, which is generated by `sigstore`. #[test] fn ed25519_from_unencrypted_pem() { let content = fs::read("tests/data/keys/ed25519_private.key") .expect("read tests/data/keys/ed25519_private.key failed."); let key = Ed25519Keys::from_pem(&content); assert!( key.is_ok(), "can not create Ed25519Keys from unencrypted PEM file." ); } /// This test will try to read an encrypted ed25519 /// private key file, which is generated by `sigstore`. #[rstest] #[case("tests/data/keys/ed25519_encrypted_private.key", PASSWORD)] #[case::empty_password("tests/data/keys/ed25519_private.key", EMPTY_PASSWORD)] fn ed25519_from_encrypted_pem(#[case] keypath: &str, #[case] password: &[u8]) { let content = fs::read(keypath).expect("read key failed."); let key = Ed25519Keys::from_encrypted_pem(&content, password); assert!( key.is_ok(), "can not create Ed25519Keys from encrypted PEM file" ); } /// This test will try to encrypt a ed25519 keypair and /// return the pem-encoded contents. #[rstest] #[case(PASSWORD)] #[case::empty_password(EMPTY_PASSWORD)] fn ed25519_to_encrypted_pem(#[case] password: &[u8]) { let key = Ed25519Keys::new().expect("create Ed25519 keys failed."); let key = key.private_key_to_encrypted_pem(password); assert!( key.is_ok(), "can not export private key in encrypted PEM format." ); } /// This test will ensure that an unencrypted /// keypair will fail to read if a non-empty /// password is given. #[test] fn ed25519_error_unencrypted_pem_password() { let content = fs::read("tests/data/keys/ed25519_private.key").expect("read key failed."); let key = Ed25519Keys::from_encrypted_pem(&content, PASSWORD); assert!( key.is_err_and(|e| e .to_string() .contains("Unencrypted private key but password provided")), "read unencrypted key with password" ); } /// This test will generate a Ed25519Keys, encode the private key /// into pem, and decode a new key from the generated pem-encoded /// private key. #[test] fn ed25519_to_and_from_pem() { let key = Ed25519Keys::new().expect("create ed25519 keys failed."); let key = key .private_key_to_pem() .expect("export private key to PEM format failed."); let key = Ed25519Keys::from_pem(key.as_bytes()); assert!(key.is_ok(), "can not create Ed25519Keys from PEM string."); } /// This test will generate a Ed25519Keys, encode the private key /// into pem, and decode a new key from the generated pem-encoded /// private key. #[rstest] #[case(PASSWORD)] #[case::empty_password(EMPTY_PASSWORD)] fn ed25519_to_and_from_encrypted_pem(#[case] password: &[u8]) { let key = Ed25519Keys::new().expect("create ed25519 keys failed."); let key = key .private_key_to_encrypted_pem(password) .expect("export private key to PEM format failed."); let key = Ed25519Keys::from_encrypted_pem(key.as_bytes(), password); assert!(key.is_ok(), "can not create Ed25519Keys from PEM string."); } /// This test will generate a Ed25519Keys, encode the private key /// it into der, and decode a new key from the generated der-encoded /// private key. #[test] fn ed25519_to_and_from_der() { let key = Ed25519Keys::new().expect("create ed25519 keys failed."); let key = key .private_key_to_der() .expect("export private key to DER format failed."); let key = Ed25519Keys::from_der(&key); assert!(key.is_ok(), "can not create Ed25519Keys from DER bytes.") } /// This test will generate a ed25519 keypair. /// And then use the verification key interface to instantial /// a VerificationKey object. #[test] fn ed25519_generate_public_key() { let key = Ed25519Keys::new().expect("create ed25519 keys failed."); let pubkey = key .public_key_to_pem() .expect("export private key to PEM format failed."); assert!( CosignVerificationKey::from_pem(pubkey.as_bytes(), &SigningScheme::ED25519).is_ok(), "can not convert public key in PEM format into CosignVerificationKey.", ); let pubkey = key .public_key_to_der() .expect("export private key to DER format failed."); assert!( CosignVerificationKey::from_der(&pubkey, &SigningScheme::ED25519).is_ok(), "can not create CosignVerificationKey from der bytes." ); } /// This test will generate a ed25519 keypair. /// And then derive a `CosignVerificationKey` from it. #[test] fn ecdsa_derive_verification_key() { let key = Ed25519Keys::new().expect("create ed25519 keys failed."); assert!( key.to_verification_key(&SigningScheme::ED25519).is_ok(), "can not create CosignVerificationKey from EcdsaKeys via `to_verification_key`.", ); } /// This test will do the following things: /// * Generate a ed25519 keypair. /// * Sign the MESSAGE with the private key then generate a signature. /// * Verify the signature using the public key. #[test] fn ed25519_sign_and_verify() { let key = Ed25519Keys::new().expect("create ed25519 keys failed."); let pubkey = key .public_key_to_pem() .expect("export private key to PEM format failed."); let signer = Ed25519Signer::from_ed25519_keys(&key) .expect("create Ed25519Signer from ed25519 keys failed."); let sig = signer .sign(MESSAGE.as_bytes()) .expect("signing message failed."); let verification_key = CosignVerificationKey::from_pem(pubkey.as_bytes(), &SigningScheme::ED25519) .expect("convert CosignVerificationKey from public key failed."); let signature = Signature::Raw(&sig); assert!( verification_key .verify_signature(signature, MESSAGE.as_bytes()) .is_ok(), "can not verify the signature.", ); } } ================================================ FILE: src/crypto/signing_key/kdf.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Key Derivation Function for Sigstore //! //! This is the Rust version of KDF used in Sigstore. //! Please refer to //! for golang version. use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64_STD_ENGINE}; use crypto_secretbox::aead::{AeadMut, KeyInit}; use rand::Rng; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use crate::errors::*; /// Salt bit length used in scrypt algorithm. pub const SALT_SIZE: u32 = 32; /// KDF name for scrypt. pub const NAME_SCRYPT: &str = "scrypt"; /// Scrypt algorithm parameter log2(n) pub const SCRYPT_N_LOW: u32 = 32768; pub const SCRYPT_N_HIGH: u32 = 65536; /// Scrypt algorithm parameter r pub const SCRYPT_R: u32 = 8; /// Scrypt algorithm parameter p pub const SCRYPT_P: u32 = 1; /// Secret box name pub const NAME_SECRET_BOX: &str = "nacl/secretbox"; /// Key length for secretbox pub const BOX_KEY_SIZE: usize = 32; /// Nonce length for secretbox pub const BOX_NONCE_SIZE: u32 = 24; /// Parameters for scrypt algorithm. #[derive(Serialize, Deserialize)] pub struct ScryptParams { #[serde(rename = "N")] n: u32, r: u32, p: u32, } /// Key Derivation Function. /// Using scrypt algorithm from a password. #[derive(Serialize, Deserialize)] struct ScryptKDF { name: String, params: ScryptParams, #[serde(serialize_with = "to_base64", deserialize_with = "from_base64")] salt: Vec, } /// Help to serialize `salt` to base64 fn to_base64(v: &[u8], serializer: S) -> std::result::Result where S: Serializer, { serializer.serialize_str(&BASE64_STD_ENGINE.encode(v)) } /// Help to deserialize `salt` from base64 fn from_base64<'de, D>(deserializer: D) -> std::result::Result, D::Error> where D: Deserializer<'de>, { let s = ::deserialize(deserializer)?; BASE64_STD_ENGINE .decode(s) .map_err(serde::de::Error::custom) } impl Default for ScryptKDF { /// Create a new Key derivation function object fn default() -> Self { let salt = generate_random(SALT_SIZE); Self { name: NAME_SCRYPT.into(), params: ScryptParams { n: SCRYPT_N_LOW, r: SCRYPT_R, p: SCRYPT_P, }, salt, } } } impl ScryptKDF { /// Derivate a new key from the given password fn key(&self, password: &[u8]) -> Result> { let log_n = (self.params.n as f64).log2() as u8; let params = scrypt::Params::new( log_n, self.params.r, self.params.p, scrypt::Params::RECOMMENDED_LEN, )?; let mut res = vec![0; BOX_KEY_SIZE]; scrypt::scrypt(password, &self.salt, ¶ms, &mut res)?; Ok(res) } /// Check whether the given params is as the default, /// to avoid a DoS attack. fn check_params(&self) -> Result<()> { match (self.params.n == SCRYPT_N_LOW || self.params.n == SCRYPT_N_HIGH) && self.params.r == SCRYPT_R && self.params.p == SCRYPT_P { true => Ok(()), false => Err(SigstoreError::PrivateKeyDecryptError( "Unexpected kdf parameters".into(), )), } } } /// Secretbox is used to seal the given secret #[derive(Serialize, Deserialize)] struct SecretBoxCipher { name: String, #[serde(serialize_with = "to_base64", deserialize_with = "from_base64")] nonce: Vec, #[serde(skip)] encrypted: bool, } impl Default for SecretBoxCipher { fn default() -> Self { let nonce = generate_random(BOX_NONCE_SIZE); Self { name: NAME_SECRET_BOX.into(), nonce, encrypted: false, } } } impl SecretBoxCipher { /// Seal the plaintext using the key and nonce. fn encrypt(&mut self, plaintext: &[u8], key: &[u8]) -> Result> { if self.encrypted { return Err(SigstoreError::PrivateKeyEncryptError( "Encrypt must only be called once for each cipher instance".into(), )); } self.encrypted = true; #[allow(deprecated)] let nonce = crypto_secretbox::Nonce::from_slice(&self.nonce); #[allow(deprecated)] let key = crypto_secretbox::Key::from_slice(key); let mut cipher = crypto_secretbox::XSalsa20Poly1305::new(key); cipher .encrypt(nonce, plaintext) .map_err(|e| SigstoreError::PrivateKeyEncryptError(e.to_string())) } /// Unseal the ciphertext using the key fn decrypt(&self, ciphertext: &[u8], key: &[u8]) -> Result> { #[allow(deprecated)] let nonce = crypto_secretbox::Nonce::from_slice(&self.nonce); #[allow(deprecated)] let key = crypto_secretbox::Key::from_slice(key); let mut cipher = crypto_secretbox::XSalsa20Poly1305::new(key); cipher .decrypt(nonce, ciphertext) .map_err(|e| SigstoreError::PrivateKeyEncryptError(e.to_string())) } } /// `Data` is all content of a encrypted private key. #[derive(Serialize, Deserialize)] struct Data { kdf: ScryptKDF, cipher: SecretBoxCipher, #[serde( rename = "ciphertext", serialize_with = "to_base64", deserialize_with = "from_base64" )] cipher_text: Vec, } /// Generate a random Vec of given length. fn generate_random(len: u32) -> Vec { let mut res = Vec::new(); for _ in 0..len { res.push(rand::thread_rng().r#gen()); } res } /// Encrypt the given plaintext using a derived key from /// password. In sigstore, it is used to encrypt the /// private key. pub fn encrypt(plaintext: &[u8], password: &[u8]) -> Result> { let kdf = ScryptKDF::default(); let key = kdf.key(password)?; let mut box_cipher = SecretBoxCipher::default(); let cipher_text = box_cipher.encrypt(plaintext, &key)?; let data = Data { kdf, cipher: box_cipher, cipher_text, }; let res = serde_json::to_vec(&data)?; Ok(res) } /// Encrypt the given plaintext using a derived key from /// password. In sigstore, it is used to decrypt the /// private key. pub fn decrypt(ciphertext: &[u8], password: &[u8]) -> Result> { let data: Data = serde_json::from_slice(ciphertext)?; if data.cipher.name != NAME_SECRET_BOX { return Err(SigstoreError::PrivateKeyDecryptError(format!( "Unknown cipher name: {}", data.cipher.name ))); } if data.kdf.name != NAME_SCRYPT { return Err(SigstoreError::PrivateKeyDecryptError(format!( "Unknown kdf name: {}", data.kdf.name ))); } data.kdf.check_params()?; let key = data.kdf.key(password)?; data.cipher.decrypt(&data.cipher_text, &key) } #[cfg(test)] mod tests { use assert_json_diff::assert_json_eq; use serde_json::json; use crate::crypto::signing_key::kdf::Data; /// This test will firstly deserialize the given KDF /// payload generated from cosign in golang, and then /// serialize the generated object into a new string. #[test] fn serde_kdf() { let input_json = json!({ "kdf": { "name": "scrypt", "params": { "N": 32768u32, "r": 8u32, "p": 1u32 }, "salt": "+QseLb/O/0j2dG201MALNSv2xLcclv6UvpXZVvXGT0k=", }, "cipher": { "name": "nacl/secretbox", "nonce": "B5zH5d9AwoPkgaPAwIgpnft2BO6HZM/j", }, "ciphertext": "RQPqIJtoWjlVC49xXNG+zfkGrJF3DWIhdRArI0XeTjGx04QzjAAeybGgW4T9JWKuYYe49NIZCEOD2G8cisMJ9KXHPaxT6Q/lLa8XrkavRrzkaD3xj8tc2AAntvUz8OACtH3zmimeFLr+EtecDb/UNjNFCtW1SlIh6DsfTsbBL67uQqLrFQMW8r70SvsZLkXV8mFhMsKyVryWlQ==", }); let data: Data = serde_json::from_value(input_json.clone()).expect("Cannot deserialize json Data"); let actual_json = serde_json::to_value(data).expect("Cannot serialize Data back to JSON"); assert_json_eq!(input_json, actual_json); } } ================================================ FILE: src/crypto/signing_key/mod.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! # Keys Interface //! //! This mod includes asymmetric key pair generation, exporting/importing, //! signing and verification key derivation. All the above features are //! given by two enums: //! * [`SigStoreKeyPair`]: an abstraction for asymmetric encryption key pairs. //! * [`SigStoreSigner`]: an abstraction for digital signing algorithms. //! //! The [`SigStoreKeyPair`] now includes the key types of the following algorithms: //! * [`SigStoreKeyPair::RSA`]: RSA key pair //! * [`SigStoreKeyPair::ECDSA`]: Elliptic curve key pair //! * [`SigStoreKeyPair::ED25519`]: Edwards curve-25519 key pair //! //! The [`SigStoreSigner`] now includes the following signing schemes: //! * [`SigStoreSigner::RSA_PSS_SHA256`]: RSA signatures using PSS padding and SHA-256. //! * [`SigStoreSigner::RSA_PSS_SHA384`]: RSA signatures using PSS padding and SHA-384. //! * [`SigStoreSigner::RSA_PSS_SHA512`]: RSA signatures using PSS padding and SHA-512. //! * [`SigStoreSigner::RSA_PKCS1_SHA256`]: RSA signatures using PKCS#1v1.5 padding and SHA-256. //! * [`SigStoreSigner::RSA_PKCS1_SHA384`]: RSA signatures using PKCS#1v1.5 padding and SHA-384. //! * [`SigStoreSigner::RSA_PKCS1_SHA512`]: RSA signatures using PKCS#1v1.5 padding and SHA-512. //! * [`SigStoreSigner::ECDSA_P256_SHA256_ASN1`]: ASN.1 DER-encoded ECDSA //! signatures using the P-256 curve and SHA-256. //! * [`SigStoreSigner::ECDSA_P384_SHA384_ASN1`]: ASN.1 DER-encoded ECDSA //! signatures using the P-384 curve and SHA-384. //! * [`SigStoreSigner::ED25519`]: ECDSA signature using SHA2-512 //! as the digest function and curve edwards25519. //! //! # Simple Usages //! //! ```rust //! use sigstore::crypto::signing_key::SigStoreSigner; //! use sigstore::crypto::SigningScheme; //! use sigstore::crypto::Signature; //! //! let test_data = b"test message"; //! // generate a key pair for ECDSA_P256_SHA256_ASN1 //! let signer = SigningScheme::ECDSA_P256_SHA256_ASN1.create_signer().unwrap(); //! //! // signing some message and get the message //! let sig = signer.sign(test_data).unwrap(); //! //! // get the public key to verify //! let verification_key = signer.to_verification_key().unwrap(); //! //! // do verification //! let res = verification_key.verify_signature( //! Signature::Raw(&sig), //! test_data, //! ); //! //! assert!(res.is_ok()); //! ``` //! //! More use cases please refer to <`https://github.com/sigstore/sigstore-rs/tree/main/examples/key_interface`> use elliptic_curve::zeroize::Zeroizing; use crate::errors::*; use self::{ ecdsa::{ECDSAKeys, ec::EcdsaSigner}, ed25519::{Ed25519Keys, Ed25519Signer}, rsa::{DigestAlgorithm, PaddingScheme, RSASigner, keypair::RSAKeys}, }; use super::{SigningScheme, verification_key::CosignVerificationKey}; pub mod ecdsa; pub mod ed25519; pub mod kdf; pub mod rsa; /// The label for pem of cosign generated encrypted private keys. pub const COSIGN_PRIVATE_KEY_PEM_LABEL: &str = "ENCRYPTED COSIGN PRIVATE KEY"; /// The label for pem of public keys. pub const PUBLIC_KEY_PEM_LABEL: &str = "PUBLIC KEY"; /// The label for pem of sigstore generated encrypted private keys. pub const SIGSTORE_PRIVATE_KEY_PEM_LABEL: &str = "ENCRYPTED SIGSTORE PRIVATE KEY"; /// The label for pem of private keys. pub const PRIVATE_KEY_PEM_LABEL: &str = "PRIVATE KEY"; /// The label for pem of RSA private keys. pub const RSA_PRIVATE_KEY_PEM_LABEL: &str = "RSA PRIVATE KEY"; /// Every signing scheme must implement this interface. /// All private export methods using the wrapper `Zeroizing`. /// It will tell the compiler when the /// result der object is dropped, the relative memory will /// be flushed to zero to avoid leaving the private key in /// the ram. pub trait KeyPair { /// `public_key_to_pem` will export the PEM-encoded public key. fn public_key_to_pem(&self) -> Result; /// `public_key_to_der` will export the asn.1 PKIX public key. fn public_key_to_der(&self) -> Result>; /// `private_key_to_encrypted_pem` will export the encrypted asn.1 pkcs8 private key. /// This encryption follows the go-lang version in /// using nacl secretbox. fn private_key_to_encrypted_pem(&self, password: &[u8]) -> Result>; /// `private_key_to_pem` will export the PEM-encoded pkcs8 private key. fn private_key_to_pem(&self) -> Result>; /// `private_key_to_der` will export the asn.1 pkcs8 private key. fn private_key_to_der(&self) -> Result>>; /// `to_verification_key` will derive the `CosignVerificationKey` from /// the public key. fn to_verification_key( &self, signature_digest_algorithm: &SigningScheme, ) -> Result; } /// Wrapper for different kinds of keys. pub enum SigStoreKeyPair { ECDSA(ECDSAKeys), ED25519(Ed25519Keys), RSA(RSAKeys), } impl std::fmt::Display for SigStoreKeyPair { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { SigStoreKeyPair::ECDSA(_) => write!(f, "EC Key"), SigStoreKeyPair::ED25519(_) => write!(f, "Ed25519 Key"), SigStoreKeyPair::RSA(_) => write!(f, "RSA Key"), } } } /// This macro helps to reduce duplicated code. macro_rules! sigstore_keypair_from { ($func: ident ($($args:expr),*)) => { if let Ok(keys) = ECDSAKeys::$func($($args,)*) { Ok(SigStoreKeyPair::ECDSA(keys)) } else if let Ok(keys) = Ed25519Keys::$func($($args,)*) { Ok(SigStoreKeyPair::ED25519(keys)) } else if let Ok(keys) = RSAKeys::$func($($args,)*) { Ok(SigStoreKeyPair::RSA(keys)) } else { Err(SigstoreError::KeyParseError("Unsupported key type".to_string())) } } } /// This macro helps to reduce duplicated code. macro_rules! sigstore_keypair_code { ($func: ident ($($args:expr),*), $obj:ident) => { match $obj { SigStoreKeyPair::ECDSA(keys) => keys.as_inner().$func($($args,)*), SigStoreKeyPair::ED25519(keys) => keys.$func($($args,)*), SigStoreKeyPair::RSA(keys) => keys.$func($($args,)*), } } } impl SigStoreKeyPair { /// Builds a `SigStoreKeyPair` from pkcs8 PEM-encoded private key. pub fn from_pem(pem_data: &[u8]) -> Result { sigstore_keypair_from!(from_pem(pem_data)) } /// Builds a `SigStoreKeyPair` from pkcs8 DER-encoded private key. pub fn from_der(private_key: &[u8]) -> Result { sigstore_keypair_from!(from_der(private_key)) } /// Builds a `SigStoreKeyPair` from encrypted pkcs8 PEM-encoded private key. pub fn from_encrypted_pem(pem_data: &[u8], password: &[u8]) -> Result { sigstore_keypair_from!(from_encrypted_pem(pem_data, password)) } /// `public_key_to_pem` will export the PEM-encoded public key. pub fn public_key_to_pem(&self) -> Result { sigstore_keypair_code!(public_key_to_pem(), self) } /// `public_key_to_der` will export the asn.1 PKIX public key. pub fn public_key_to_der(&self) -> Result> { sigstore_keypair_code!(public_key_to_der(), self) } /// `private_key_to_encrypted_pem` will export the encrypted asn.1 pkcs8 private key. /// This encryption follows the go-lang version in /// using nacl secretbox. pub fn private_key_to_encrypted_pem(&self, password: &[u8]) -> Result> { sigstore_keypair_code!(private_key_to_encrypted_pem(password), self) } /// `private_key_to_pem` will export the PEM-encoded pkcs8 private key. pub fn private_key_to_pem(&self) -> Result> { sigstore_keypair_code!(private_key_to_pem(), self) } /// `private_key_to_der` will export the asn.1 pkcs8 private key. pub fn private_key_to_der(&self) -> Result>> { sigstore_keypair_code!(private_key_to_der(), self) } /// `to_verification_key` will derive the `CosignVerificationKey` from /// the public key. pub fn to_verification_key( &self, signing_scheme: &SigningScheme, ) -> Result { sigstore_keypair_code!(to_verification_key(signing_scheme), self) } /// Convert this KeyPair into a [`SigStoreSigner`] due to the given /// signing scheme. If the key type does not match the given /// signing scheme, an error will occur. pub fn to_sigstore_signer(&self, signing_scheme: &SigningScheme) -> Result { match self { SigStoreKeyPair::ECDSA(keys) => match signing_scheme { SigningScheme::ECDSA_P256_SHA256_ASN1 => match keys { ECDSAKeys::P256(key) => { let signer = EcdsaSigner::from_ecdsa_keys(key)?; Ok(SigStoreSigner::ECDSA_P256_SHA256_ASN1(signer)) } ECDSAKeys::P384(_) => Err(SigstoreError::UnmatchedKeyAndSigningScheme { key_typ: keys.to_string(), scheme: signing_scheme.to_string(), }), }, SigningScheme::ECDSA_P384_SHA384_ASN1 => match keys { ECDSAKeys::P384(key) => { let signer = EcdsaSigner::from_ecdsa_keys(key)?; Ok(SigStoreSigner::ECDSA_P384_SHA384_ASN1(signer)) } ECDSAKeys::P256(_) => Err(SigstoreError::UnmatchedKeyAndSigningScheme { key_typ: keys.to_string(), scheme: signing_scheme.to_string(), }), }, _ => Err(SigstoreError::UnmatchedKeyAndSigningScheme { key_typ: self.to_string(), scheme: signing_scheme.to_string(), }), }, SigStoreKeyPair::ED25519(keys) => { if *signing_scheme != SigningScheme::ED25519 { Err(SigstoreError::UnmatchedKeyAndSigningScheme { key_typ: self.to_string(), scheme: signing_scheme.to_string(), }) } else { keys.to_sigstore_signer() } } SigStoreKeyPair::RSA(keys) => match signing_scheme { SigningScheme::RSA_PSS_SHA256(_) => { keys.to_sigstore_signer(DigestAlgorithm::Sha256, PaddingScheme::PSS) } SigningScheme::RSA_PSS_SHA384(_) => { keys.to_sigstore_signer(DigestAlgorithm::Sha384, PaddingScheme::PSS) } SigningScheme::RSA_PSS_SHA512(_) => { keys.to_sigstore_signer(DigestAlgorithm::Sha512, PaddingScheme::PSS) } SigningScheme::RSA_PKCS1_SHA256(_) => { keys.to_sigstore_signer(DigestAlgorithm::Sha256, PaddingScheme::PKCS1v15) } SigningScheme::RSA_PKCS1_SHA384(_) => { keys.to_sigstore_signer(DigestAlgorithm::Sha384, PaddingScheme::PKCS1v15) } SigningScheme::RSA_PKCS1_SHA512(_) => { keys.to_sigstore_signer(DigestAlgorithm::Sha512, PaddingScheme::PKCS1v15) } _ => Err(SigstoreError::UnmatchedKeyAndSigningScheme { key_typ: self.to_string(), scheme: signing_scheme.to_string(), }), }, } } } /// `Signer` trait is an abstraction of a specific set of asymmetric /// private key, hash function and (if needs) padding algorithm. This /// trait helps to construct the `SigStoreSigner` enum. pub trait Signer { /// Return the ref to the keypair inside the signer fn key_pair(&self) -> &dyn KeyPair; /// `sign` will sign the given data, and return the signature. fn sign(&self, msg: &[u8]) -> Result>; } #[derive(Debug)] #[allow(non_camel_case_types)] pub enum SigStoreSigner { RSA_PSS_SHA256(RSASigner), RSA_PSS_SHA384(RSASigner), RSA_PSS_SHA512(RSASigner), RSA_PKCS1_SHA256(RSASigner), RSA_PKCS1_SHA384(RSASigner), RSA_PKCS1_SHA512(RSASigner), ECDSA_P256_SHA256_ASN1(EcdsaSigner), ECDSA_P384_SHA384_ASN1(EcdsaSigner), ED25519(Ed25519Signer), } impl SigStoreSigner { /// Return the inner `Signer` of the enum. This function /// is useful in the inner interface conversion. fn as_inner(&self) -> &dyn Signer { match self { SigStoreSigner::ECDSA_P256_SHA256_ASN1(inner) => inner, SigStoreSigner::ECDSA_P384_SHA384_ASN1(inner) => inner, SigStoreSigner::ED25519(inner) => inner, SigStoreSigner::RSA_PSS_SHA256(inner) => inner, SigStoreSigner::RSA_PSS_SHA384(inner) => inner, SigStoreSigner::RSA_PSS_SHA512(inner) => inner, SigStoreSigner::RSA_PKCS1_SHA256(inner) => inner, SigStoreSigner::RSA_PKCS1_SHA384(inner) => inner, SigStoreSigner::RSA_PKCS1_SHA512(inner) => inner, } } /// `sign` will sign the given data, and return the signature. pub fn sign(&self, msg: &[u8]) -> Result> { self.as_inner().sign(msg) } /// `to_verification_key` will derive the verification_key for the `SigStoreSigner`. pub fn to_verification_key(&self) -> Result { let signing_scheme = match self { SigStoreSigner::ECDSA_P256_SHA256_ASN1(_) => SigningScheme::ECDSA_P256_SHA256_ASN1, SigStoreSigner::ECDSA_P384_SHA384_ASN1(_) => SigningScheme::ECDSA_P384_SHA384_ASN1, SigStoreSigner::ED25519(_) => SigningScheme::ED25519, SigStoreSigner::RSA_PSS_SHA256(_) => SigningScheme::RSA_PSS_SHA256(0), SigStoreSigner::RSA_PSS_SHA384(_) => SigningScheme::RSA_PSS_SHA384(0), SigStoreSigner::RSA_PSS_SHA512(_) => SigningScheme::RSA_PSS_SHA512(0), SigStoreSigner::RSA_PKCS1_SHA256(_) => SigningScheme::RSA_PKCS1_SHA256(0), SigStoreSigner::RSA_PKCS1_SHA384(_) => SigningScheme::RSA_PKCS1_SHA384(0), SigStoreSigner::RSA_PKCS1_SHA512(_) => SigningScheme::RSA_PKCS1_SHA512(0), }; self.as_inner() .key_pair() .to_verification_key(&signing_scheme) } /// `key_pair` will return the reference of the `SigStoreKeyPair` enum due to `SigStoreSigner`. pub fn to_sigstore_keypair(&self) -> Result { Ok(match self { SigStoreSigner::ECDSA_P256_SHA256_ASN1(inner) => { SigStoreKeyPair::ECDSA(inner.ecdsa_keys().to_wrapped_ecdsa_keys()?) } SigStoreSigner::ECDSA_P384_SHA384_ASN1(inner) => { SigStoreKeyPair::ECDSA(inner.ecdsa_keys().to_wrapped_ecdsa_keys()?) } SigStoreSigner::ED25519(inner) => { SigStoreKeyPair::ED25519(Ed25519Keys::from_ed25519key(inner.ed25519_keys())?) } SigStoreSigner::RSA_PSS_SHA256(inner) => SigStoreKeyPair::RSA(inner.rsa_keys().clone()), SigStoreSigner::RSA_PSS_SHA384(inner) => SigStoreKeyPair::RSA(inner.rsa_keys().clone()), SigStoreSigner::RSA_PSS_SHA512(inner) => SigStoreKeyPair::RSA(inner.rsa_keys().clone()), SigStoreSigner::RSA_PKCS1_SHA256(inner) => { SigStoreKeyPair::RSA(inner.rsa_keys().clone()) } SigStoreSigner::RSA_PKCS1_SHA384(inner) => { SigStoreKeyPair::RSA(inner.rsa_keys().clone()) } SigStoreSigner::RSA_PKCS1_SHA512(inner) => { SigStoreKeyPair::RSA(inner.rsa_keys().clone()) } }) } } #[cfg(test)] mod tests { use rstest::rstest; use crate::crypto::{Signature, SigningScheme, verification_key::CosignVerificationKey}; /// This is a test MESSAGE used to be signed by all signing test. pub const MESSAGE: &str = r#"{ "critical": { "identity": { "docker-reference": "registry-testing.svc.lan/busybox" }, "image": { "docker-manifest-digest": "sha256:f3cfc9d0dbf931d3db4685ec659b7ac68e2a578219da4aae65427886e649b06b" }, "type": "cosign container image signature" }, "optional": null }"#; /// This test will do the following things: /// * Randomly generate a key pair due to the given signing scheme. /// * Signing the MESSAGE and generate a signature using /// the private key. /// * Derive the verification key using both `from_sigstore_signer` /// and `to_verification_key`. /// * Verify the signature with the public key. #[rstest] #[case(SigningScheme::ECDSA_P256_SHA256_ASN1)] #[case(SigningScheme::ECDSA_P384_SHA384_ASN1)] #[case(SigningScheme::ED25519)] fn sigstore_signing(#[case] signing_scheme: SigningScheme) { let signer = signing_scheme .create_signer() .unwrap_or_else(|_| panic!("create SigStoreSigner with {:?} failed", signing_scheme)); let key_pair = signer .to_sigstore_keypair() .expect("convert SigStoreSigner to SigStoreKeypair failed."); let _pubkey = key_pair .public_key_to_pem() .expect("export public key in PEM format failed."); let sig = signer .sign(MESSAGE.as_bytes()) .expect("sign message failed."); let _verification_key = signer .to_verification_key() .expect("derive signer into verification key failed."); let verification_key = CosignVerificationKey::from_sigstore_signer(&signer) .expect("derive verification key from signer failed."); let signature = Signature::Raw(&sig); let verify_res = verification_key.verify_signature(signature, MESSAGE.as_bytes()); assert!(verify_res.is_ok(), "can not verify the signature."); } } ================================================ FILE: src/crypto/signing_key/rsa/keypair.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! # RSA Key Pair //! //! This is a wrapper for Rust Crypto. RSA Key Pair //! is the struct [`RSAKeys`], which implements [`KeyPair`] //! trait, and provides different exportation and importation operations //! from/to der/pem bytes. //! //! # RSA Key Pair Operations //! //! For example, we generate an RSA key pair and export. //! //! ```rust //! use sigstore::crypto::signing_key::{rsa::keypair::RSAKeys, KeyPair}; //! //! let rsa_keys = RSAKeys::new(2048).unwrap(); //! //! // export the pem encoded public key. //! let pubkey = rsa_keys.public_key_to_pem().unwrap(); //! //! // export the private key using sigstore encryption. //! let privkey_pem = rsa_keys.private_key_to_encrypted_pem(b"password").unwrap(); //! //! // import the key pair from the encrypted pem. //! let rsa_keys2 = RSAKeys::from_encrypted_pem(privkey_pem.as_bytes(), b"password").unwrap(); //! ``` use pkcs8::{DecodePrivateKey, EncodePrivateKey, EncodePublicKey}; use rsa::{ RsaPrivateKey, RsaPublicKey, pkcs1::DecodeRsaPrivateKey, pkcs1v15::SigningKey, pss::BlindedSigningKey, }; use crate::{ crypto::{CosignVerificationKey, SigStoreSigner, SigningScheme}, errors::*, }; use crate::crypto::signing_key::{ COSIGN_PRIVATE_KEY_PEM_LABEL, KeyPair, PRIVATE_KEY_PEM_LABEL, RSA_PRIVATE_KEY_PEM_LABEL, SIGSTORE_PRIVATE_KEY_PEM_LABEL, kdf, }; use super::{DigestAlgorithm, PaddingScheme, RSASigner}; #[derive(Clone, Debug)] pub struct RSAKeys { pub(crate) private_key: RsaPrivateKey, public_key: RsaPublicKey, } impl RSAKeys { /// Create a new `RSAKeys` Object. /// The private key will be randomly /// generated. pub fn new(bit_size: usize) -> Result { let mut rng = rand::rngs::OsRng {}; let private_key = RsaPrivateKey::new(&mut rng, bit_size)?; let public_key = RsaPublicKey::from(&private_key); Ok(Self { private_key, public_key, }) } /// Create a new `RSAKeys` Object from given `RSAKeys` Object. pub fn from_rsa_privatekey_key(key: &RSAKeys) -> Result { let priv_key = key.private_key_to_der()?; RSAKeys::from_der(&priv_key) } /// Builds a `RSAKeys` from encrypted pkcs8 PEM-encoded private key. /// The label should be [`COSIGN_PRIVATE_KEY_PEM_LABEL`] or /// [`SIGSTORE_PRIVATE_KEY_PEM_LABEL`]. pub fn from_encrypted_pem(encrypted_pem: &[u8], password: &[u8]) -> Result { let key = pem::parse(encrypted_pem)?; match key.tag() { COSIGN_PRIVATE_KEY_PEM_LABEL | SIGSTORE_PRIVATE_KEY_PEM_LABEL => { let der = kdf::decrypt(key.contents(), password)?; let pkcs8 = pkcs8::PrivateKeyInfo::try_from(&der[..]).map_err(|e| { SigstoreError::PKCS8Error(format!("Read PrivateKeyInfo failed: {e}")) })?; let private_key = RsaPrivateKey::try_from(pkcs8).map_err(|e| { SigstoreError::PKCS8Error(format!( "Convert from pkcs8 pem to rsa private key failed: {e}" )) })?; Ok(Self::from(private_key)) } RSA_PRIVATE_KEY_PEM_LABEL | PRIVATE_KEY_PEM_LABEL if password.is_empty() => { Self::from_pem(encrypted_pem) } RSA_PRIVATE_KEY_PEM_LABEL | PRIVATE_KEY_PEM_LABEL if !password.is_empty() => { Err(SigstoreError::PrivateKeyDecryptError( "Unencrypted private key but password provided".into(), )) } tag => Err(SigstoreError::PrivateKeyDecryptError(format!( "Unsupported pem tag {tag}" ))), } } /// Builds a `RSAKeys` from a pkcs8 PEM-encoded private key. /// The label of PEM should be [`PRIVATE_KEY_PEM_LABEL`] pub fn from_pem(pem: &[u8]) -> Result { let pem = std::str::from_utf8(pem)?; let (label, document) = pkcs8::SecretDocument::from_pem(pem) .map_err(|e| SigstoreError::PKCS8DerError(e.to_string()))?; match label { PRIVATE_KEY_PEM_LABEL => { let pkcs8 = pkcs8::PrivateKeyInfo::try_from(document.as_bytes()).map_err(|e| { SigstoreError::PKCS8Error(format!("Read PrivateKeyInfo failed: {e}")) })?; let private_key = RsaPrivateKey::try_from(pkcs8).map_err(|e| { SigstoreError::PKCS8Error(format!( "Convert from pkcs8 pem to rsa private key failed: {e}" )) })?; Ok(Self::from(private_key)) } RSA_PRIVATE_KEY_PEM_LABEL => { let private_key = RsaPrivateKey::from_pkcs1_der(document.as_bytes())?; Ok(Self::from(private_key)) } tag => Err(SigstoreError::PrivateKeyDecryptError(format!( "Unsupported pem tag {tag}" ))), } } /// Builds a `RSAKeys` from a pkcs8 asn.1 private key. pub fn from_der(der_bytes: &[u8]) -> Result { let private_key = RsaPrivateKey::from_pkcs8_der(der_bytes).map_err(|e| { SigstoreError::PKCS8Error(format!( "Convert from pkcs8 der to rsa private key failed: {e}" )) })?; Ok(Self::from(private_key)) } /// `to_sigstore_signer` will create the [`SigStoreSigner`] using /// this rsa key pair. pub fn to_sigstore_signer( &self, digest_algorithm: DigestAlgorithm, padding_scheme: PaddingScheme, ) -> Result { let private_key = self.private_key.clone(); Ok(match padding_scheme { PaddingScheme::PSS => match digest_algorithm { DigestAlgorithm::Sha256 => { SigStoreSigner::RSA_PSS_SHA256(RSASigner::RSA_PSS_SHA256( BlindedSigningKey::::new(private_key), self.clone(), )) } DigestAlgorithm::Sha384 => { SigStoreSigner::RSA_PSS_SHA384(RSASigner::RSA_PSS_SHA384( BlindedSigningKey::::new(private_key), self.clone(), )) } DigestAlgorithm::Sha512 => { SigStoreSigner::RSA_PSS_SHA512(RSASigner::RSA_PSS_SHA512( BlindedSigningKey::::new(private_key), self.clone(), )) } }, PaddingScheme::PKCS1v15 => match digest_algorithm { DigestAlgorithm::Sha256 => { SigStoreSigner::RSA_PKCS1_SHA256(RSASigner::RSA_PKCS1_SHA256( SigningKey::::new(private_key), self.clone(), )) } DigestAlgorithm::Sha384 => { SigStoreSigner::RSA_PKCS1_SHA384(RSASigner::RSA_PKCS1_SHA384( SigningKey::::new(private_key), self.clone(), )) } DigestAlgorithm::Sha512 => { SigStoreSigner::RSA_PKCS1_SHA512(RSASigner::RSA_PKCS1_SHA512( SigningKey::::new(private_key), self.clone(), )) } }, }) } } impl From for RSAKeys { fn from(private_key: RsaPrivateKey) -> Self { Self { private_key: private_key.clone(), public_key: RsaPublicKey::from(private_key), } } } impl KeyPair for RSAKeys { /// Return the public key in PEM-encoded SPKI format. fn public_key_to_pem(&self) -> Result { self.public_key .to_public_key_pem(pkcs8::LineEnding::LF) .map_err(|e| SigstoreError::PKCS8SpkiError(e.to_string())) } /// Return the public key in asn.1 SPKI format. fn public_key_to_der(&self) -> Result> { Ok(self .public_key .to_public_key_der() .map_err(|e| SigstoreError::PKCS8SpkiError(e.to_string()))? .to_vec()) } /// Return the encrypted asn.1 pkcs8 private key. fn private_key_to_encrypted_pem(&self, password: &[u8]) -> Result> { let der = self.private_key_to_der()?; let pem = pem::Pem::new( SIGSTORE_PRIVATE_KEY_PEM_LABEL, kdf::encrypt(&der, password)?, ); let pem = pem::encode(&pem); Ok(zeroize::Zeroizing::new(pem)) } /// Return the private key in pkcs8 PEM-encoded format. fn private_key_to_pem(&self) -> Result> { self.private_key .to_pkcs8_pem(pkcs8::LineEnding::LF) .map_err(|e| SigstoreError::PKCS8SpkiError(e.to_string())) } /// Return the private key in asn.1 pkcs8 format. fn private_key_to_der(&self) -> Result>> { let pkcs8 = self .private_key .to_pkcs8_der() .map_err(|e| SigstoreError::PKCS8Error(e.to_string()))?; Ok(pkcs8.to_bytes()) } /// Derive the relative [`CosignVerificationKey`]. fn to_verification_key(&self, signing_scheme: &SigningScheme) -> Result { let der = self.public_key_to_der()?; let res = CosignVerificationKey::from_der(&der, signing_scheme)?; Ok(res) } } #[cfg(test)] mod tests { use std::fs; use rstest::rstest; use crate::crypto::{ Signature, SigningScheme, signing_key::{ KeyPair, Signer, rsa::{DigestAlgorithm, PaddingScheme, RSASigner}, tests::MESSAGE, }, verification_key::CosignVerificationKey, }; use super::RSAKeys; const PASSWORD: &[u8] = b"123"; const EMPTY_PASSWORD: &[u8] = b""; const KEY_SIZE: usize = 2048; /// This test will try to read an unencrypted rsa /// private key file, which is generated by `sigstore`. #[test] fn rsa_from_unencrypted_pem() { let content = fs::read("tests/data/keys/rsa_private.key") .expect("read tests/data/keys/rsa_private.key failed."); let key = RSAKeys::from_pem(&content); assert!( key.is_ok(), "can not create RSAKeys from unencrypted PEM file." ); } /// This test will try to read an encrypted rsa /// private key file, which is generated by `sigstore`. #[rstest] #[case("tests/data/keys/rsa_encrypted_private.key", PASSWORD)] #[case("tests/data/keys/rsa_private.key", EMPTY_PASSWORD)] fn rsa_from_encrypted_pem(#[case] keypath: &str, #[case] password: &[u8]) { let content = fs::read(keypath).expect("read tests/data/keys/rsa_encrypted_private.key failed."); let key = RSAKeys::from_encrypted_pem(&content, password); assert!( key.is_ok(), "can not create RSAKeys from encrypted PEM file" ); } /// This test will try to encrypt a rsa keypair and /// return the pem-encoded contents. The bit size /// of the rsa key is [`KEY_SIZE`]. #[rstest] #[case(PASSWORD)] #[case::empty_password(PASSWORD)] fn rsa_to_encrypted_pem(#[case] password: &[u8]) { let key = RSAKeys::new(KEY_SIZE).expect("create rsa keys failed."); let key = key.private_key_to_encrypted_pem(password); assert!( key.is_ok(), "can not export private key in encrypted PEM format." ); } /// This test will ensure that an unencrypted /// keypair will fail to read if a non-empty /// password is given. #[test] fn rsa_error_unencrypted_pem_password() { let content = fs::read("tests/data/keys/rsa_private.key").expect("read key failed."); let key = RSAKeys::from_encrypted_pem(&content, PASSWORD); assert!( key.is_err_and(|e| e .to_string() .contains("Unencrypted private key but password provided")), "read unencrypted key with password" ); } /// This test will generate a RSAKeys, encode the private key /// it into pem, and decode a new key from the generated pem-encoded /// private key. #[test] fn rsa_to_and_from_pem() { let key = RSAKeys::new(KEY_SIZE).expect("create rsa keys failed."); let key = key .private_key_to_pem() .expect("export private key to PEM format failed."); let key = RSAKeys::from_pem(key.as_bytes()); assert!(key.is_ok(), "can not create RSAKeys from PEM string."); } /// This test will generate a RSAKeys, encode the private key /// it into pem, and decode a new key from the generated pem-encoded /// private key. #[rstest] #[case(PASSWORD)] #[case::empty_password(EMPTY_PASSWORD)] fn rsa_to_and_from_encrypted_pem(#[case] password: &[u8]) { let key = RSAKeys::new(KEY_SIZE).expect("create rsa keys failed."); let key = key .private_key_to_encrypted_pem(password) .expect("export private key to PEM format failed."); let key = RSAKeys::from_encrypted_pem(key.as_bytes(), password); assert!(key.is_ok(), "can not create RSAKeys from PEM string."); } /// This test will generate a RSAKeys, encode the private key /// it into der, and decode a new key from the generated der-encoded /// private key. #[test] fn rsa_to_and_from_der() { let key = RSAKeys::new(KEY_SIZE).expect("create rsa keys failed."); let key = key .private_key_to_der() .expect("export private key to DER format failed."); let key = RSAKeys::from_der(&key); assert!(key.is_ok(), "can not create RSAKeys from DER bytes.") } /// This test will generate a rsa keypair. /// And then use the verification key interface to instantial /// a VerificationKey object. #[test] fn rsa_generate_public_key() { let key = RSAKeys::new(KEY_SIZE).expect("create rsa keys failed."); let pubkey = key .public_key_to_pem() .expect("export private key to PEM format failed."); assert!( CosignVerificationKey::from_pem(pubkey.as_bytes(), &SigningScheme::RSA_PSS_SHA256(0),) .is_ok() ); let pubkey = key .public_key_to_der() .expect("export private key to DER format failed."); assert!( CosignVerificationKey::from_der(&pubkey, &SigningScheme::RSA_PSS_SHA256(0)).is_ok(), "can not create CosignVerificationKey from der bytes." ); } /// This test will generate a rsa keypair. /// And then derive a `CosignVerificationKey` from it. #[test] fn rsa_derive_verification_key() { let key = RSAKeys::new(KEY_SIZE).expect("create rsa keys failed."); assert!( key.to_verification_key(&SigningScheme::RSA_PSS_SHA256(0)) .is_ok(), "can not create CosignVerificationKey from RSAKeys via `to_verification_key`." ); } /// This test will do the following things: /// * Generate a rsa keypair. /// * Sign the MESSAGE with `RSA_PSS_SHA256` /// * Verify the signature using the public key. #[test] fn rsa_sign_and_verify() { let rsa_keys = RSAKeys::new(KEY_SIZE).expect("create rsa keys failed."); let pubkey = rsa_keys .public_key_to_pem() .expect("export private key to PEM format failed."); let signer = RSASigner::from_rsa_keys(&rsa_keys, DigestAlgorithm::Sha256, PaddingScheme::PSS); let sig = signer .sign(MESSAGE.as_bytes()) .expect("signing message failed."); let verification_key = CosignVerificationKey::from_pem(pubkey.as_bytes(), &SigningScheme::RSA_PSS_SHA256(0)) .expect("convert CosignVerificationKey from public key failed."); let signature = Signature::Raw(&sig); assert!( verification_key .verify_signature(signature, MESSAGE.as_bytes()) .is_ok(), "can not verify the signature." ); } } ================================================ FILE: src/crypto/signing_key/rsa/mod.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! # RSA Signer //! //! RSA Signer support the following padding schemes: //! * `PSS` //! * `PKCS#1 v1.5` //! //! And the following digest algorithms: //! * `Sha256` //! * `Sha384` //! * `Sha512` //! //! # RSA Signer Operaion //! //! A [`RSASigner`] can be derived from a [`RSAKeys`] //! ```rust //! use sigstore::crypto::signing_key::{rsa::{RSASigner, keypair::RSAKeys, DigestAlgorithm, PaddingScheme}, KeyPair, Signer}; //! use sigstore::crypto::Signature; //! //! let rsa_keys = RSAKeys::new(2048).unwrap(); //! //! // create a signer //! let signer = RSASigner::from_rsa_keys(&rsa_keys, DigestAlgorithm::Sha256, PaddingScheme::PSS); //! //! // test message to be signed //! let message = b"some message"; //! //! // sign //! let signature_data = signer.sign(message).unwrap(); //! //! // export the [`CosignVerificationKey`] from the [`SigStoreSigner`], which //! // is used to verify the signature. //! let verification_key = signer.to_verification_key().unwrap(); //! //! // verify //! assert!(verification_key.verify_signature(Signature::Raw(&signature_data),message).is_ok()); //! ``` use ::rsa::{ pkcs1v15::SigningKey, pss::BlindedSigningKey, signature::{Keypair, RandomizedSigner, SignatureEncoding}, }; use self::keypair::RSAKeys; use crate::{crypto::CosignVerificationKey, errors::*}; use super::{KeyPair, Signer}; pub mod keypair; pub const DEFAULT_KEY_SIZE: usize = 2048; /// Different digest algorithms used in /// RSA-based signing algorithm. pub enum DigestAlgorithm { Sha256, Sha384, Sha512, } /// Different padding schemes used in /// RSA-based signing algorithm. /// * `PSS`: Probabilistic Signature Scheme, more secure than `PKCS1v15`. /// * `PKCS1v15`: also known as simply PKCS1, is a simple padding /// scheme developed for use with RSA keys. pub enum PaddingScheme { PSS, PKCS1v15, } /// Rsa signing scheme families: /// * `PKCS1v15`: PKCS#1 1.5 padding for RSA signatures. /// * `PSS`: RSA PSS padding for RSA signatures. /// /// Both schemes support the following digest algorithms: /// * `Sha256` /// * `Sha384` /// * `Sha512` #[derive(Debug)] #[allow(non_camel_case_types)] pub enum RSASigner { RSA_PSS_SHA256(BlindedSigningKey, RSAKeys), RSA_PSS_SHA384(BlindedSigningKey, RSAKeys), RSA_PSS_SHA512(BlindedSigningKey, RSAKeys), RSA_PKCS1_SHA256(SigningKey, RSAKeys), RSA_PKCS1_SHA384(SigningKey, RSAKeys), RSA_PKCS1_SHA512(SigningKey, RSAKeys), } /// helper to generate match arms macro_rules! iter_on_rsa { ($domain: ident, $match_item: expr, $signer: ident, $key: ident, $func: expr) => { match $match_item { $domain::RSA_PSS_SHA256($signer, $key) => $func, $domain::RSA_PSS_SHA384($signer, $key) => $func, $domain::RSA_PSS_SHA512($signer, $key) => $func, $domain::RSA_PKCS1_SHA256($signer, $key) => $func, $domain::RSA_PKCS1_SHA384($signer, $key) => $func, $domain::RSA_PKCS1_SHA512($signer, $key) => $func, } }; } impl RSASigner { pub fn from_rsa_keys( rsa_keys: &RSAKeys, digest_algorithm: DigestAlgorithm, padding_scheme: PaddingScheme, ) -> Self { let private_key = rsa_keys.private_key.clone(); match padding_scheme { PaddingScheme::PSS => match digest_algorithm { DigestAlgorithm::Sha256 => RSASigner::RSA_PSS_SHA256( BlindedSigningKey::::new(private_key), rsa_keys.clone(), ), DigestAlgorithm::Sha384 => RSASigner::RSA_PSS_SHA384( BlindedSigningKey::::new(private_key), rsa_keys.clone(), ), DigestAlgorithm::Sha512 => RSASigner::RSA_PSS_SHA512( BlindedSigningKey::::new(private_key), rsa_keys.clone(), ), }, PaddingScheme::PKCS1v15 => match digest_algorithm { DigestAlgorithm::Sha256 => RSASigner::RSA_PKCS1_SHA256( SigningKey::::new(private_key), rsa_keys.clone(), ), DigestAlgorithm::Sha384 => RSASigner::RSA_PKCS1_SHA384( SigningKey::::new(private_key), rsa_keys.clone(), ), DigestAlgorithm::Sha512 => RSASigner::RSA_PKCS1_SHA512( SigningKey::::new(private_key), rsa_keys.clone(), ), }, } } /// Return the ref to the [`RSAKeys`] inside the RSASigner pub fn rsa_keys(&self) -> &RSAKeys { iter_on_rsa!(RSASigner, self, _signer, key, key) } /// Return the related [`CosignVerificationKey`] of this RSASigner pub fn to_verification_key(&self) -> Result { Ok(match self { RSASigner::RSA_PSS_SHA256(signer, _) => { CosignVerificationKey::RSA_PSS_SHA256(signer.verifying_key()) } RSASigner::RSA_PSS_SHA384(signer, _) => { CosignVerificationKey::RSA_PSS_SHA384(signer.verifying_key()) } RSASigner::RSA_PSS_SHA512(signer, _) => { CosignVerificationKey::RSA_PSS_SHA512(signer.verifying_key()) } RSASigner::RSA_PKCS1_SHA256(signer, _) => { CosignVerificationKey::RSA_PKCS1_SHA256(signer.verifying_key()) } RSASigner::RSA_PKCS1_SHA384(signer, _) => { CosignVerificationKey::RSA_PKCS1_SHA384(signer.verifying_key()) } RSASigner::RSA_PKCS1_SHA512(signer, _) => { CosignVerificationKey::RSA_PKCS1_SHA512(signer.verifying_key()) } }) } } impl Signer for RSASigner { /// `sign` will sign the given data, and return the signature. fn sign(&self, msg: &[u8]) -> Result> { let mut rng = rand::thread_rng(); Ok(iter_on_rsa!( RSASigner, self, signer, _key, signer.sign_with_rng(&mut rng, msg).to_vec() )) } /// Return the ref to the [`KeyPair`] trait object inside the RSASigner fn key_pair(&self) -> &dyn KeyPair { iter_on_rsa!(RSASigner, self, _signer, key, key) } } #[cfg(test)] mod tests { use rstest::rstest; use super::{DEFAULT_KEY_SIZE, DigestAlgorithm, PaddingScheme, RSASigner, keypair::RSAKeys}; use crate::crypto::{ Signature, SigningScheme, signing_key::{KeyPair, Signer, tests::MESSAGE}, }; #[rstest] #[case( DigestAlgorithm::Sha256, PaddingScheme::PKCS1v15, SigningScheme::RSA_PKCS1_SHA256(0) )] #[case( DigestAlgorithm::Sha384, PaddingScheme::PKCS1v15, SigningScheme::RSA_PKCS1_SHA384(0) )] #[case( DigestAlgorithm::Sha512, PaddingScheme::PKCS1v15, SigningScheme::RSA_PKCS1_SHA512(0) )] #[case( DigestAlgorithm::Sha256, PaddingScheme::PSS, SigningScheme::RSA_PSS_SHA256(0) )] #[case( DigestAlgorithm::Sha384, PaddingScheme::PSS, SigningScheme::RSA_PSS_SHA384(0) )] #[case( DigestAlgorithm::Sha512, PaddingScheme::PSS, SigningScheme::RSA_PSS_SHA512(0) )] fn rsa_schemes( #[case] digest_algorithm: DigestAlgorithm, #[case] padding_scheme: PaddingScheme, #[case] signing_scheme: SigningScheme, ) { let rsa_keys = RSAKeys::new(DEFAULT_KEY_SIZE).expect("RSA keys generated failed."); let signer = RSASigner::from_rsa_keys(&rsa_keys, digest_algorithm, padding_scheme); let sig = signer.sign(MESSAGE.as_bytes()).expect("sign failed."); let vk = rsa_keys .to_verification_key(&signing_scheme) .expect("derive CosignVerificationKey failed."); let signature = Signature::Raw(&sig); vk.verify_signature(signature, MESSAGE.as_bytes()) .expect("can not verify the signature."); } } ================================================ FILE: src/crypto/transparency.rs ================================================ // Copyright 2023 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Types for Certificate Transparency validation. use const_oid::db::rfc6962::{CT_PRECERT_SCTS, CT_PRECERT_SIGNING_CERT}; use digest::Digest; use thiserror::Error; use tls_codec::{SerializeBytes, TlsByteVecU16, TlsByteVecU24, TlsSerializeBytes, TlsSize}; use tracing::debug; use x509_cert::{ Certificate, der, der::{Decode, Encode}, ext::pkix::{ ExtendedKeyUsage, SignedCertificateTimestamp, SignedCertificateTimestampList, sct::Version, }, }; use super::{ certificate, keyring::{Keyring, KeyringError}, }; use crate::fulcio::SigningCertificateDetachedSCT; fn cert_is_preissuer(cert: &Certificate) -> bool { let eku: ExtendedKeyUsage = match cert.tbs_certificate.get() { Ok(Some((_, ext))) => ext, _ => return false, }; eku.0.contains(&CT_PRECERT_SIGNING_CERT) } // fn find_issuer_cert(chain: &[Certificate]) -> Option<&Certificate> { let cert = if cert_is_preissuer(&chain[0]) { &chain[1] } else { &chain[0] }; certificate::is_ca(cert).ok()?; Some(cert) } #[derive(Debug, Error)] pub enum CertificateErrorKind { #[error("SCT list extension missing from leaf certificate")] LeafSCTMissing, #[error("cannot find leaf certificate's issuer")] IssuerMissing, #[error("cannot decode leaf certificate's issuer")] IssuerMalformed, #[error("cannot decode SCT")] LeafSCTMalformed, #[error(transparent)] Der(#[from] der::Error), #[error(transparent)] Tls(#[from] tls_codec::Error), } impl From for CertificateErrorKind { fn from(value: x509_cert::ext::pkix::Error) -> Self { match value { x509_cert::ext::pkix::Error::Der(e) => CertificateErrorKind::Der(e), x509_cert::ext::pkix::Error::Tls(e) => CertificateErrorKind::Tls(e), } } } #[derive(Debug, thiserror::Error)] pub enum SCTError { #[error("failed to extract SCT from certificate")] Parsing(#[from] CertificateErrorKind), #[error("failed to reconstruct signed payload")] Serialization(#[source] tls_codec::Error), #[error("failed to verify SCT")] Verification(#[from] KeyringError), } #[derive(PartialEq, Debug, TlsSerializeBytes, TlsSize)] #[repr(u8)] enum SignatureType { CertificateTimestamp = 0, TreeHash = 1, } #[derive(PartialEq, Debug)] #[repr(u16)] enum LogEntryType { X509Entry = 0, PrecertEntry = 1, } #[derive(PartialEq, Debug, TlsSerializeBytes, TlsSize)] struct PreCert { // opaque issuer_key_hash[32]; issuer_key_hash: [u8; 32], // opaque TBSCertificate<1..2^24-1>; tbs_certificate: TlsByteVecU24, } #[derive(PartialEq, Debug, TlsSerializeBytes, TlsSize)] #[repr(u16)] enum SignedEntry { // opaque ASN.1Cert<1..2^24-1>; #[tls_codec(discriminant = "LogEntryType::X509Entry")] X509Entry(TlsByteVecU24), #[tls_codec(discriminant = "LogEntryType::PrecertEntry")] PrecertEntry(PreCert), } #[derive(PartialEq, Debug, TlsSerializeBytes, TlsSize)] pub struct DigitallySigned { version: Version, signature_type: SignatureType, timestamp: u64, signed_entry: SignedEntry, // opaque CtExtensions<0..2^16-1>; extensions: TlsByteVecU16, // XX(tnytown): pass in some useful context. These fields will not be encoded into the // TLS DigitallySigned blob, but we need them to properly verify the reconstructed // message. #[tls_codec(skip)] log_id: [u8; 32], #[tls_codec(skip)] signature: Vec, } #[derive(Debug)] pub struct CertificateEmbeddedSCT<'a> { cert: &'a Certificate, sct: SignedCertificateTimestamp, issuer_id: [u8; 32], } impl<'a> CertificateEmbeddedSCT<'a> { fn new_with_spki(cert: &'a Certificate, spki: &[u8]) -> Result { let scts: SignedCertificateTimestampList = match cert.tbs_certificate.get() { Ok(Some((_, ext))) => ext, _ => return Err(SCTError::Parsing(CertificateErrorKind::LeafSCTMissing))?, }; // Parse SCT structures. let sct = match scts .parse_timestamps() .map_err(CertificateErrorKind::from)? .as_slice() { [e] => e, // We expect exactly one element here. Fail if there are more or less. _ => return Err(CertificateErrorKind::LeafSCTMissing)?, } .parse_timestamp() .map_err(CertificateErrorKind::from)?; let issuer_id = { let mut hasher = sha2::Sha256::new(); hasher.update(spki); hasher.finalize().into() }; Ok(Self { cert, sct, issuer_id, }) } pub fn new(leaf: &'a Certificate, chain: &[Certificate]) -> Result { // Traverse chain to find the issuer we're verifying against. let issuer = find_issuer_cert(chain); let spki = issuer .ok_or(CertificateErrorKind::IssuerMissing)? .tbs_certificate .subject_public_key_info .to_der() .map_err(CertificateErrorKind::from)?; Self::new_with_spki(leaf, &spki) } pub fn new_with_verified_path( leaf: &'a Certificate, chain: &webpki::VerifiedPath, ) -> Result { let issuer_spki = if let Some(issuer) = chain.intermediate_certificates().next() { debug!("intermediate is the leaf's issuer"); let issuer = Certificate::from_der(&issuer.der()) .map_err(CertificateErrorKind::from)? .tbs_certificate; issuer .subject_public_key_info .to_der() .map_err(CertificateErrorKind::from)? } else { debug!("anchor is the leaf's issuer"); // Prefix the SPKI with the DER SEQUENCE tag and a short definite-form length. let body = &chain.anchor().subject_public_key_info[..]; let body_len = body .len() .try_into() .or(Err(CertificateErrorKind::IssuerMalformed))?; let prefix = &[0x30u8, body_len]; [prefix, body].concat() }; Self::new_with_spki(leaf, &issuer_spki) } } impl From<&CertificateEmbeddedSCT<'_>> for DigitallySigned { fn from(value: &CertificateEmbeddedSCT) -> Self { // Construct the precert by filtering out the SCT extension. let mut tbs_precert = value.cert.tbs_certificate.clone(); tbs_precert.extensions = tbs_precert.extensions.map(|exts| { exts.iter() .filter(|v| v.extn_id != CT_PRECERT_SCTS) .cloned() .collect() }); let mut tbs_precert_der = Vec::new(); tbs_precert .encode_to_vec(&mut tbs_precert_der) .expect("failed to re-encode Precertificate!"); DigitallySigned { // XX(tnytown): This match is needed because `sct::Version` does not implement Copy. version: match value.sct.version { Version::V1 => Version::V1, }, signature_type: SignatureType::CertificateTimestamp, timestamp: value.sct.timestamp, signed_entry: SignedEntry::PrecertEntry(PreCert { issuer_key_hash: value.issuer_id, tbs_certificate: tbs_precert_der.as_slice().into(), }), extensions: value.sct.extensions.clone(), log_id: value.sct.log_id.key_id, signature: value.sct.signature.signature.clone().into(), } } } impl From<&SigningCertificateDetachedSCT> for DigitallySigned { fn from(value: &SigningCertificateDetachedSCT) -> Self { let sct = &value.signed_certificate_timestamp; DigitallySigned { version: Version::V1, signature_type: SignatureType::CertificateTimestamp, timestamp: sct.timestamp, signed_entry: SignedEntry::X509Entry(value.chain.certificates[0].contents().into()), extensions: sct.extensions.clone().into(), log_id: sct.id, signature: sct.signature.clone(), } } } /// Verifies a given signing certificate's Signed Certificate Timestamp. /// /// SCT verification as defined by [RFC 6962] guarantees that a given certificate has been submitted /// to a Certificate Transparency log. Verification should be performed on the signing certificate /// in Sigstore verify and sign flows. Certificates that fail SCT verification are misissued and /// MUST NOT be trusted. /// /// For more information on Certificate Transparency and the guarantees it provides, see . /// /// [RFC 6962]: https://datatracker.ietf.org/doc/html/rfc6962 pub fn verify_sct(sct: S, keyring: &Keyring) -> Result<(), SCTError> where S: Into, { let sct: DigitallySigned = sct.into(); let serialized = sct.tls_serialize().map_err(SCTError::Serialization)?; keyring.verify(&sct.log_id, &sct.signature, &serialized)?; Ok(()) } #[cfg(test)] mod tests { use super::{CertificateEmbeddedSCT, verify_sct}; use crate::crypto::keyring::Keyring; use crate::fulcio::SigningCertificateDetachedSCT; use p256::ecdsa::VerifyingKey; use std::str::FromStr; use x509_cert::Certificate; use x509_cert::der::DecodePem; use x509_cert::spki::EncodePublicKey; #[test] fn verify_embedded_sct() { let cert_pem = r#"-----BEGIN CERTIFICATE----- MIICzDCCAlGgAwIBAgIUF96OLbM9/tDVHKCJliXLTFvnfjAwCgYIKoZIzj0EAwMw NzEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MR4wHAYDVQQDExVzaWdzdG9yZS1pbnRl cm1lZGlhdGUwHhcNMjMxMjEzMDU1MDU1WhcNMjMxMjEzMDYwMDU1WjAAMFkwEwYH KoZIzj0CAQYIKoZIzj0DAQcDQgAEmir+Lah2291zCsLkmREQNLzf99z571BNB+fa rerSLGzcwLFK7GRLTGYcO0oStxCYavxRQPMo3JvB8vGtZbn/76OCAXAwggFsMA4G A1UdDwEB/wQEAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAzAdBgNVHQ4EFgQU8U9M t9GMrRm8+gifPtc63nlP3OIwHwYDVR0jBBgwFoAU39Ppz1YkEZb5qNjpKFWixi4Y ZD8wGwYDVR0RAQH/BBEwD4ENYXNjQHRldHN1by5zaDAsBgorBgEEAYO/MAEBBB5o dHRwczovL2dpdGh1Yi5jb20vbG9naW4vb2F1dGgwLgYKKwYBBAGDvzABCAQgDB5o dHRwczovL2dpdGh1Yi5jb20vbG9naW4vb2F1dGgwgYkGCisGAQQB1nkCBAIEewR5 AHcAdQDdPTBqxscRMmMZHhyZZzcCokpeuN48rf+HinKALynujgAAAYxhumYsAAAE AwBGMEQCIHRRe20lRrNM4xd07mpjTtgaE6FGS3jjF++zW8ZMnth3AiAd6LVAAeVW hSW4T0XJRw9lGU6/EK9+ELZpEjrY03dJ1zAKBggqhkjOPQQDAwNpADBmAjEAiHqK W9PQ/5h7VROVIWPaxUo3LhrL2sZanw4bzTDBDY0dRR19ZFzjtAph1RzpQqppAjEA plAvxwkAIR2jurboJZ4Zm9rNAx8KvA+A5yQFzNkGgKDLjTJrKmSKoIcWV3j7WfdL -----END CERTIFICATE-----"#; let chain_pem = [ r#"-----BEGIN CERTIFICATE----- MIICGjCCAaGgAwIBAgIUALnViVfnU0brJasmRkHrn/UnfaQwCgYIKoZIzj0EAwMw KjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0y MjA0MTMyMDA2MTVaFw0zMTEwMDUxMzU2NThaMDcxFTATBgNVBAoTDHNpZ3N0b3Jl LmRldjEeMBwGA1UEAxMVc2lnc3RvcmUtaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0C AQYFK4EEACIDYgAE8RVS/ysH+NOvuDZyPIZtilgUF9NlarYpAd9HP1vBBH1U5CV7 7LSS7s0ZiH4nE7Hv7ptS6LvvR/STk798LVgMzLlJ4HeIfF3tHSaexLcYpSASr1kS 0N/RgBJz/9jWCiXno3sweTAOBgNVHQ8BAf8EBAMCAQYwEwYDVR0lBAwwCgYIKwYB BQUHAwMwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQU39Ppz1YkEZb5qNjp KFWixi4YZD8wHwYDVR0jBBgwFoAUWMAeX5FFpWapesyQoZMi0CrFxfowCgYIKoZI zj0EAwMDZwAwZAIwPCsQK4DYiZYDPIaDi5HFKnfxXx6ASSVmERfsynYBiX2X6SJR nZU84/9DZdnFvvxmAjBOt6QpBlc4J/0DxvkTCqpclvziL6BCCPnjdlIB3Pu3BxsP mygUY7Ii2zbdCdliiow= -----END CERTIFICATE-----"#, r#"-----BEGIN CERTIFICATE----- MIIB9zCCAXygAwIBAgIUALZNAPFdxHPwjeDloDwyYChAO/4wCgYIKoZIzj0EAwMw KjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0y MTEwMDcxMzU2NTlaFw0zMTEwMDUxMzU2NThaMCoxFTATBgNVBAoTDHNpZ3N0b3Jl LmRldjERMA8GA1UEAxMIc2lnc3RvcmUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAT7 XeFT4rb3PQGwS4IajtLk3/OlnpgangaBclYpsYBr5i+4ynB07ceb3LP0OIOZdxex X69c5iVuyJRQ+Hz05yi+UF3uBWAlHpiS5sh0+H2GHE7SXrk1EC5m1Tr19L9gg92j YzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRY wB5fkUWlZql6zJChkyLQKsXF+jAfBgNVHSMEGDAWgBRYwB5fkUWlZql6zJChkyLQ KsXF+jAKBggqhkjOPQQDAwNpADBmAjEAj1nHeXZp+13NWBNa+EDsDP8G1WWg1tCM WP/WHPqpaVo0jhsweNFZgSs0eE7wYI4qAjEA2WB9ot98sIkoF3vZYdd3/VtWB5b9 TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ -----END CERTIFICATE-----"#, ]; let ctfe_pem = r#"-----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEiPSlFi0CmFTfEjCUqF9HuCEcYXNK AaYalIJmBZ8yyezPjTqhxrKBpMnaocVtLJBI1eM3uXnQzQGAJdJ4gs9Fyw== -----END PUBLIC KEY-----"#; let cert = Certificate::from_pem(cert_pem).unwrap(); let chain = chain_pem.map(|c| Certificate::from_pem(c).unwrap()); let sct = CertificateEmbeddedSCT::new(&cert, &chain).unwrap(); let ctfe_key: VerifyingKey = VerifyingKey::from_str(ctfe_pem).unwrap(); let keyring = Keyring::new([ctfe_key.to_public_key_der().unwrap().as_bytes()]).unwrap(); assert!(verify_sct(&sct, &keyring).is_ok()); } #[test] fn verify_detached_sct() { let sct_json = r#"{"chain": {"certificates": ["-----BEGIN CERTIFICATE-----\nMIICUTCCAfigAwIBAgIUAafXe40Q5jthWJMo+JsJJCq09IAwCgYIKoZIzj0EAwIw\naDEMMAoGA1UEBhMDVVNBMQswCQYDVQQIEwJXQTERMA8GA1UEBxMIS2lya2xhbmQx\nFTATBgNVBAkTDDc2NyA2dGggU3QgUzEOMAwGA1UEERMFOTgwMzMxETAPBgNVBAoT\nCHNpZ3N0b3JlMB4XDTIzMTIxNDA3MDkzMFoXDTIzMTIxNDA3MTkzMFowADBZMBMG\nByqGSM49AgEGCCqGSM49AwEHA0IABDQT+qfW/VnHts0GSqI3kOc2z1lygSUWia3y\nIOx5qyWpXS1PwVcTbJnkcQEy1mnAES76NyfN5LsHHW2m53hF4WGjgecwgeQwDgYD\nVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUFBwMDMB0GA1UdDgQWBBRpKUIe\nAqDxiw/GzKGRLFAvbaCnujAfBgNVHSMEGDAWgBTjGF7/fiITblnp3yIv3G1DETbS\ncTAbBgNVHREBAf8EETAPgQ1hc2NAdGV0c3VvLnNoMC4GCisGAQQBg78wAQEEIGh0\ndHBzOi8vb2F1dGgyLnNpZ3N0b3JlLmRldi9hdXRoMDAGCisGAQQBg78wAQgEIgwg\naHR0cHM6Ly9vYXV0aDIuc2lnc3RvcmUuZGV2L2F1dGgwCgYIKoZIzj0EAwIDRwAw\nRAIgOW+tCrt44rjWDCMSWhwC0zJRWpqH/qWRgSw2ndK7w3ICIGz0DDAXhvl6JFAz\nQp+40dnoUGKr+y0MF1zVaDOb1y+q\n-----END CERTIFICATE-----", "-----BEGIN CERTIFICATE-----\nMIICFzCCAb2gAwIBAgIUbPNC2sKGpw8cOQfpv8yJii7c7TEwCgYIKoZIzj0EAwIw\naDEMMAoGA1UEBhMDVVNBMQswCQYDVQQIEwJXQTERMA8GA1UEBxMIS2lya2xhbmQx\nFTATBgNVBAkTDDc2NyA2dGggU3QgUzEOMAwGA1UEERMFOTgwMzMxETAPBgNVBAoT\nCHNpZ3N0b3JlMB4XDTIzMTIxNDA2NDIzNloXDTMzMTIxNDA2NDIzNlowaDEMMAoG\nA1UEBhMDVVNBMQswCQYDVQQIEwJXQTERMA8GA1UEBxMIS2lya2xhbmQxFTATBgNV\nBAkTDDc2NyA2dGggU3QgUzEOMAwGA1UEERMFOTgwMzMxETAPBgNVBAoTCHNpZ3N0\nb3JlMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEfe1ZllZHky68F3jRhY4Hxx7o\nPBoBaD1i9UJtyE8xfIYGVpD1+jSHctZRmiv2ZsDEE6WN3k5lc2O2GyemHJwULqNF\nMEMwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYE\nFOMYXv9+IhNuWenfIi/cbUMRNtJxMAoGCCqGSM49BAMCA0gAMEUCIDj5wbYN3ym8\nwY+Uy+FkKASpBQodXdgF+JR9tWhNDlc/AiEAwqMTyLa6Yr+5t1DvnUsR4lQNoXD7\nz8XmxcUnJTenEh4=\n-----END CERTIFICATE-----"]}, "signedCertificateTimestamp": "eyJzY3RfdmVyc2lvbiI6MCwiaWQiOiJla0ppei9acEcrVUVuNXcvR2FJcjYrYXdJK1JLZmtwdC9WOVRldTd2YTFrPSIsInRpbWVzdGFtcCI6MTcwMjUzNzc3MDQyNiwiZXh0ZW5zaW9ucyI6IiIsInNpZ25hdHVyZSI6IkJBTUFSakJFQWlBT28vdDZ4RDY0RkV2TWpGcGFsMUhVVkZxQU5nOXJ3ZEttd3NQU2wxNm5FZ0lnZmFNTlJHMTBxQVY1Z280MzU1WkxVNVVvdHRvWTAwK0l0YXhZYjRkZmV0Zz0ifQ=="}"#; let ctfe_pem = r#"-----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEbbQiLx6GKy6ivhc11wJGbQjc2VX/ mnuk5d670MTXR3p+LIAcxd5MhqIHpLmyYJ5mDKLEoZ/pC0nPuje3JueBcA== -----END PUBLIC KEY-----"#; let sct: SigningCertificateDetachedSCT = serde_json::from_str(sct_json).unwrap(); let ctfe_key: VerifyingKey = VerifyingKey::from_str(ctfe_pem).unwrap(); let keyring = Keyring::new([ctfe_key.to_public_key_der().unwrap().as_bytes()]).unwrap(); assert!(verify_sct(&sct, &keyring).is_ok()); } } ================================================ FILE: src/crypto/verification_key.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64_STD_ENGINE}; use const_oid::db::rfc5912::{ID_EC_PUBLIC_KEY, RSA_ENCRYPTION}; use ed25519::pkcs8::DecodePublicKey as ED25519DecodePublicKey; use rsa::{pkcs1v15, pss}; use sha2::{Digest, Sha256, Sha384}; use signature::{DigestVerifier, Verifier, hazmat::PrehashVerifier}; use x509_cert::{der::referenced::OwnedToRef, spki::SubjectPublicKeyInfoOwned}; use super::{ Signature, SigningScheme, signing_key::{KeyPair, SigStoreSigner}, }; use crate::errors::*; #[cfg(feature = "cosign")] use crate::cosign::constants::ED25519; /// A key that can be used to verify signatures. /// /// Currently the following key formats are supported: /// /// * RSA keys, using PSS padding and SHA-256 as the digest algorithm /// * RSA keys, using PSS padding and SHA-384 as the digest algorithm /// * RSA keys, using PSS padding and SHA-512 as the digest algorithm /// * RSA keys, using PKCS1 padding and SHA-256 as the digest algorithm /// * RSA keys, using PKCS1 padding and SHA-384 as the digest algorithm /// * RSA keys, using PKCS1 padding and SHA-512 as the digest algorithm /// * Ed25519 keys, and SHA-512 as the digest algorithm /// * ECDSA keys, ASN.1 DER-encoded, using the P-256 curve and SHA-256 as digest algorithm /// * ECDSA keys, ASN.1 DER-encoded, using the P-384 curve and SHA-384 as digest algorithm #[allow(non_camel_case_types)] #[derive(Debug, Clone)] pub enum CosignVerificationKey { RSA_PSS_SHA256(pss::VerifyingKey), RSA_PSS_SHA384(pss::VerifyingKey), RSA_PSS_SHA512(pss::VerifyingKey), RSA_PKCS1_SHA256(pkcs1v15::VerifyingKey), RSA_PKCS1_SHA384(pkcs1v15::VerifyingKey), RSA_PKCS1_SHA512(pkcs1v15::VerifyingKey), ECDSA_P256_SHA256_ASN1(ecdsa::VerifyingKey), ECDSA_P384_SHA384_ASN1(ecdsa::VerifyingKey), ED25519(ed25519_dalek::VerifyingKey), } /// Attempts to convert a [x509 Subject Public Key Info](x509_cert::spki::SubjectPublicKeyInfo) object into /// a `CosignVerificationKey` one. /// /// Currently can convert only the following types of keys: /// * ECDSA P-256: assumes the SHA-256 digest algorithm is used /// * ECDSA P-384: assumes the SHA-384 digest algorithm is used /// * RSA: assumes PKCS1 padding is used impl TryFrom<&SubjectPublicKeyInfoOwned> for CosignVerificationKey { type Error = SigstoreError; fn try_from(subject_pub_key_info: &SubjectPublicKeyInfoOwned) -> Result { let algorithm = subject_pub_key_info.algorithm.oid; let public_key_der = &subject_pub_key_info.subject_public_key; match algorithm { ID_EC_PUBLIC_KEY => { match public_key_der.raw_bytes().len() { 65 => Ok(CosignVerificationKey::ECDSA_P256_SHA256_ASN1( ecdsa::VerifyingKey::try_from(subject_pub_key_info.owned_to_ref()) .map_err(|e| { SigstoreError::PKCS8SpkiError(format!( "Ecdsa-P256 from der bytes to public key failed: {e}" )) })?, )), 97 => Ok(CosignVerificationKey::ECDSA_P384_SHA384_ASN1( ecdsa::VerifyingKey::try_from(subject_pub_key_info.owned_to_ref()) .map_err(|e| { SigstoreError::PKCS8SpkiError(format!( "Ecdsa-P384 from der bytes to public key failed: {e}" )) })?, )), _ => Err(SigstoreError::PublicKeyUnsupportedAlgorithmError(format!( "EC with size {} is not supported", // asn.1 encode caused different length (public_key_der.raw_bytes().len() - 1) * 4 ))), } } RSA_ENCRYPTION => { let pubkey = rsa::RsaPublicKey::try_from(subject_pub_key_info.owned_to_ref()) .map_err(|e| { SigstoreError::PKCS8SpkiError(format!( "RSA from der bytes to public key failed: {e}" )) })?; Ok(CosignVerificationKey::RSA_PKCS1_SHA256( pkcs1v15::VerifyingKey::::from(pubkey), )) } // #[cfg(feature = "cosign")] ED25519 => Ok(CosignVerificationKey::ED25519( ed25519_dalek::VerifyingKey::try_from(subject_pub_key_info.owned_to_ref())?, )), _ => Err(SigstoreError::PublicKeyUnsupportedAlgorithmError(format!( "Key with algorithm OID {algorithm} is not supported" ))), } } } impl CosignVerificationKey { /// Builds a [`CosignVerificationKey`] from DER-encoded data. The methods takes care /// of extracting the SubjectPublicKeyInfo from the DER-encoded data. pub fn from_der(der_data: &[u8], signing_scheme: &SigningScheme) -> Result { Ok(match signing_scheme { SigningScheme::RSA_PSS_SHA256(_) => { CosignVerificationKey::RSA_PSS_SHA256(pss::VerifyingKey::new( rsa::RsaPublicKey::from_public_key_der(der_data).map_err(|e| { SigstoreError::PKCS8SpkiError(format!( "read rsa public key from der failed: {e}" )) })?, )) } SigningScheme::RSA_PSS_SHA384(_) => { CosignVerificationKey::RSA_PSS_SHA384(pss::VerifyingKey::new( rsa::RsaPublicKey::from_public_key_der(der_data).map_err(|e| { SigstoreError::PKCS8SpkiError(format!( "read rsa public key from der failed: {e}" )) })?, )) } SigningScheme::RSA_PSS_SHA512(_) => { CosignVerificationKey::RSA_PSS_SHA512(pss::VerifyingKey::new( rsa::RsaPublicKey::from_public_key_der(der_data).map_err(|e| { SigstoreError::PKCS8SpkiError(format!( "read rsa public key from der failed: {e}" )) })?, )) } SigningScheme::RSA_PKCS1_SHA256(_) => { CosignVerificationKey::RSA_PKCS1_SHA256(pkcs1v15::VerifyingKey::new( rsa::RsaPublicKey::from_public_key_der(der_data).map_err(|e| { SigstoreError::PKCS8SpkiError(format!( "read rsa public key from der failed: {e}" )) })?, )) } SigningScheme::RSA_PKCS1_SHA384(_) => { CosignVerificationKey::RSA_PKCS1_SHA384(pkcs1v15::VerifyingKey::new( rsa::RsaPublicKey::from_public_key_der(der_data).map_err(|e| { SigstoreError::PKCS8SpkiError(format!( "read rsa public key from der failed: {e}" )) })?, )) } SigningScheme::RSA_PKCS1_SHA512(_) => { CosignVerificationKey::RSA_PKCS1_SHA512(pkcs1v15::VerifyingKey::new( rsa::RsaPublicKey::from_public_key_der(der_data).map_err(|e| { SigstoreError::PKCS8SpkiError(format!( "read rsa public key from der failed: {e}" )) })?, )) } SigningScheme::ECDSA_P256_SHA256_ASN1 => CosignVerificationKey::ECDSA_P256_SHA256_ASN1( ecdsa::VerifyingKey::from_public_key_der(der_data).map_err(|e| { SigstoreError::PKCS8SpkiError(format!( "Ecdsa-P256 from der bytes to public key failed: {e}" )) })?, ), SigningScheme::ECDSA_P384_SHA384_ASN1 => CosignVerificationKey::ECDSA_P384_SHA384_ASN1( ecdsa::VerifyingKey::from_public_key_der(der_data).map_err(|e| { SigstoreError::PKCS8SpkiError(format!( "Ecdsa-P384 from der bytes to public key failed: {e}" )) })?, ), SigningScheme::ED25519 => CosignVerificationKey::ED25519( ed25519_dalek::VerifyingKey::from_public_key_der(der_data)?, ), }) } /// Builds a [`CosignVerificationKey`] from DER-encoded public key data. This function will /// set the verification algorithm due to the public key type, s.t. /// * `RSA public key`: `RSA_PKCS1_SHA256` /// * `EC public key with P-256 curve`: `ECDSA_P256_SHA256_ASN1` /// * `EC public key with P-384 curve`: `ECDSA_P384_SHA384_ASN1` /// * `Ed25519 public key`: `Ed25519` pub fn try_from_der(der_data: &[u8]) -> Result { if let Ok(p256vk) = ecdsa::VerifyingKey::from_public_key_der(der_data) { Ok(Self::ECDSA_P256_SHA256_ASN1(p256vk)) } else if let Ok(p384vk) = ecdsa::VerifyingKey::from_public_key_der(der_data) { Ok(Self::ECDSA_P384_SHA384_ASN1(p384vk)) } else if let Ok(ed25519bytes) = ed25519::pkcs8::PublicKeyBytes::from_public_key_der(der_data) { Ok(Self::ED25519(ed25519_dalek::VerifyingKey::from_bytes( ed25519bytes.as_ref(), )?)) } else { match rsa::RsaPublicKey::from_public_key_der(der_data) { Ok(rsapk) => Ok(Self::RSA_PKCS1_SHA256(pkcs1v15::VerifyingKey::new(rsapk))), _ => Err(SigstoreError::InvalidKeyFormat { error: "Failed to parse the public key.".to_string(), }), } } } /// Builds a [`CosignVerificationKey`] from PEM-encoded data. The methods takes care /// of decoding the PEM-encoded data and then extracting the SubjectPublicKeyInfo /// from the DER-encoded bytes. pub fn from_pem(pem_data: &[u8], signing_scheme: &SigningScheme) -> Result { let key_pem = pem::parse(pem_data)?; Self::from_der(key_pem.contents(), signing_scheme) } /// Builds a [`CosignVerificationKey`] from PEM-encoded public key data. This function will /// set the verification algorithm due to the public key type, s.t. /// * `RSA public key`: `RSA_PKCS1_SHA256` /// * `EC public key with P-256 curve`: `ECDSA_P256_SHA256_ASN1` /// * `EC public key with P-384 curve`: `ECDSA_P384_SHA384_ASN1` /// * `Ed25519 public key`: `Ed25519` pub fn try_from_pem(pem_data: &[u8]) -> Result { let key_pem = pem::parse(pem_data)?; Self::try_from_der(key_pem.contents()) } /// Builds a `CosignVerificationKey` from [`SigStoreSigner`]. The methods will derive /// a `CosignVerificationKey` from the given [`SigStoreSigner`]'s public key. pub fn from_sigstore_signer(signer: &SigStoreSigner) -> Result { signer.to_verification_key() } /// Builds a `CosignVerificationKey` from [`KeyPair`]. The methods will derive /// a `CosignVerificationKey` from the given [`KeyPair`]'s public key. pub fn from_key_pair(signer: &dyn KeyPair, signing_scheme: &SigningScheme) -> Result { signer.to_verification_key(signing_scheme) } /// Verify the signature provided has been actually generated by the given key /// when signing the provided message. pub fn verify_signature(&self, signature: Signature, msg: &[u8]) -> Result<()> { let sig = match signature { Signature::Raw(data) => data.to_owned(), Signature::Base64Encoded(data) => BASE64_STD_ENGINE.decode(data)?, }; match self { CosignVerificationKey::RSA_PSS_SHA256(inner) => { let sig = pss::Signature::try_from(sig.as_slice())?; inner .verify(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } CosignVerificationKey::RSA_PSS_SHA384(inner) => { let sig = pss::Signature::try_from(sig.as_slice())?; inner .verify(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } CosignVerificationKey::RSA_PSS_SHA512(inner) => { let sig = pss::Signature::try_from(sig.as_slice())?; inner .verify(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } CosignVerificationKey::RSA_PKCS1_SHA256(inner) => { let sig = pkcs1v15::Signature::try_from(sig.as_slice())?; inner .verify(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } CosignVerificationKey::RSA_PKCS1_SHA384(inner) => { let sig = pkcs1v15::Signature::try_from(sig.as_slice())?; inner .verify(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } CosignVerificationKey::RSA_PKCS1_SHA512(inner) => { let sig = pkcs1v15::Signature::try_from(sig.as_slice())?; inner .verify(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } // ECDSA signatures are encoded in der. CosignVerificationKey::ECDSA_P256_SHA256_ASN1(inner) => { let mut hasher = Sha256::new(); digest::Digest::update(&mut hasher, msg); let sig = ecdsa::Signature::from_der(&sig)?; inner .verify_digest(hasher, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } CosignVerificationKey::ECDSA_P384_SHA384_ASN1(inner) => { let mut hasher = Sha384::new(); digest::Digest::update(&mut hasher, msg); let sig = ecdsa::Signature::from_der(&sig)?; inner .verify_digest(hasher, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } CosignVerificationKey::ED25519(inner) => { let sig = ed25519::Signature::from_slice(sig.as_slice()) .map_err(|_| SigstoreError::PublicKeyVerificationError)?; inner .verify(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } } } /// Verify the signature provided has been actually generated by the given key /// when signing the provided prehashed message. pub(crate) fn verify_prehash(&self, signature: Signature, msg: &[u8]) -> Result<()> { let sig = match signature { Signature::Raw(data) => data.to_owned(), Signature::Base64Encoded(data) => BASE64_STD_ENGINE.decode(data)?, }; match self { CosignVerificationKey::RSA_PSS_SHA256(inner) => { let sig = pss::Signature::try_from(sig.as_slice())?; inner .verify_prehash(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } CosignVerificationKey::RSA_PSS_SHA384(inner) => { let sig = pss::Signature::try_from(sig.as_slice())?; inner .verify_prehash(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } CosignVerificationKey::RSA_PSS_SHA512(inner) => { let sig = pss::Signature::try_from(sig.as_slice())?; inner .verify_prehash(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } CosignVerificationKey::RSA_PKCS1_SHA256(inner) => { let sig = pkcs1v15::Signature::try_from(sig.as_slice())?; inner .verify_prehash(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } CosignVerificationKey::RSA_PKCS1_SHA384(inner) => { let sig = pkcs1v15::Signature::try_from(sig.as_slice())?; inner .verify_prehash(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } CosignVerificationKey::RSA_PKCS1_SHA512(inner) => { let sig = pkcs1v15::Signature::try_from(sig.as_slice())?; inner .verify_prehash(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } // ECDSA signatures are encoded in der. CosignVerificationKey::ECDSA_P256_SHA256_ASN1(inner) => { let sig = ecdsa::Signature::from_der(&sig)?; inner .verify_prehash(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } CosignVerificationKey::ECDSA_P384_SHA384_ASN1(inner) => { let sig = ecdsa::Signature::from_der(&sig)?; inner .verify_prehash(msg, &sig) .map_err(|_| SigstoreError::PublicKeyVerificationError) } CosignVerificationKey::ED25519(_) => { unimplemented!("Ed25519 doesn't implement verify_prehash") } } } } #[cfg(test)] mod tests { use x509_cert::Certificate; use x509_cert::der::Decode; use super::*; use crate::crypto::tests::*; #[test] fn verify_signature_success() { let signature = Signature::Base64Encoded(b"MEUCIQD6q/COgzOyW0YH1Dk+CCYSt4uAhm3FDHUwvPI55zwnlwIgE0ZK58ZOWpZw8YVmBapJhBqCfdPekIknimuO0xH8Jh8="); let verification_key = CosignVerificationKey::from_pem(PUBLIC_KEY.as_bytes(), &SigningScheme::default()) .expect("Cannot create CosignVerificationKey"); let msg = r#"{"critical":{"identity":{"docker-reference":"registry-testing.svc.lan/busybox"},"image":{"docker-manifest-digest":"sha256:f3cfc9d0dbf931d3db4685ec659b7ac68e2a578219da4aae65427886e649b06b"},"type":"cosign container image signature"},"optional":null}"#; let outcome = verification_key.verify_signature(signature, msg.as_bytes()); assert!(outcome.is_ok()); } #[test] fn verify_signature_failure_because_wrong_msg() { let signature = Signature::Base64Encoded(b"MEUCIQD6q/COgzOyW0YH1Dk+CCYSt4uAhm3FDHUwvPI55zwnlwIgE0ZK58ZOWpZw8YVmBapJhBqCfdPekIknimuO0xH8Jh8="); let verification_key = CosignVerificationKey::from_pem(PUBLIC_KEY.as_bytes(), &SigningScheme::default()) .expect("Cannot create CosignVerificationKey"); let msg = "hello world"; let err = verification_key .verify_signature(signature, msg.as_bytes()) .expect_err("Was expecting an error"); let found = matches!(err, SigstoreError::PublicKeyVerificationError); assert!(found, "Didn't get expected error, got {:?} instead", err); } #[test] fn verify_signature_failure_because_wrong_signature() { let signature = Signature::Base64Encoded(b"this is a signature"); let verification_key = CosignVerificationKey::from_pem(PUBLIC_KEY.as_bytes(), &SigningScheme::default()) .expect("Cannot create CosignVerificationKey"); let msg = r#"{"critical":{"identity":{"docker-reference":"registry-testing.svc.lan/busybox"},"image":{"docker-manifest-digest":"sha256:f3cfc9d0dbf931d3db4685ec659b7ac68e2a578219da4aae65427886e649b06b"},"type":"cosign container image signature"},"optional":null}"#; let err = verification_key .verify_signature(signature, msg.as_bytes()) .expect_err("Was expecting an error"); let found = matches!(err, SigstoreError::Base64DecodeError(_)); assert!(found, "Didn't get expected error, got {:?} instead", err); } #[test] fn verify_signature_failure_because_wrong_verification_key() { let signature = Signature::Base64Encoded(b"MEUCIQD6q/COgzOyW0YH1Dk+CCYSt4uAhm3FDHUwvPI55zwnlwIgE0ZK58ZOWpZw8YVmBapJhBqCfdPekIknimuO0xH8Jh8="); let verification_key = CosignVerificationKey::from_pem( r#"-----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAETJP9cqpUQsn2ggmJniWGjHdlsHzD JsB89BPhZYch0U0hKANx5TY+ncrm0s8bfJxxHoenAEFhwhuXeb4PqIrtoQ== -----END PUBLIC KEY-----"# .as_bytes(), &SigningScheme::default(), ) .expect("Cannot create CosignVerificationKey"); let msg = r#"{"critical":{"identity":{"docker-reference":"registry-testing.svc.lan/busybox"},"image":{"docker-manifest-digest":"sha256:f3cfc9d0dbf931d3db4685ec659b7ac68e2a578219da4aae65427886e649b06b"},"type":"cosign container image signature"},"optional":null}"#; let err = verification_key .verify_signature(signature, msg.as_bytes()) .expect_err("Was expecting an error"); let found = matches!(err, SigstoreError::PublicKeyVerificationError); assert!(found, "Didn't get expected error, got {:?} instead", err); } #[test] fn verify_rsa_signature() { let signature = Signature::Base64Encoded(b"umasnfYJyLbYPjiq1wIy086Ns+CrgiMoQUSGqPqlUmtWsY0hbngJ73hPfJFrppviPKdBeuUiiwgKagBKIXLEXjwxQp4eE3szwqkKoAnR/lByb7ahLgVQ4MB6xDQaHD53MYtj7aOvd4O7FqJltVVjEn7nM/Du2tL5y3jf6lD7VfHZE8uRocRlyppt8SfTc5L12mVlZ0YlfKYkd334A4y/reCy3Yws0j356Wj7GLScMU5uR11Y2y41rSyYm5uXhTerwNFXsRcPMAmenMarCdCmt4Lf4wpcJBCU172xiK+rIhbMgkLjjA772+auSYf1E8CySVah5CD0Td5YC3y8vIIYaA=="); let verification_key = CosignVerificationKey::from_pem( r#"-----BEGIN PUBLIC KEY----- MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvM/dHoi6nSy7hbKHLYUr Xy6Bv35JbdoIzny5vSFiRXApr0KS56U8PugdGmh+vd7H8YNlx2YOJxzv02Blsrcm WDZcXjE3Xpsi/IHFfRZLOdwwR+u8MNFxwRUVzxyIzKGtbREVVfXPfb2Xc6FL5/tE vQtUKuR6XdzSaav2RnV5IybCB09s0Np0AUbdi5EfSe4INuqgY+VFYLjvM5onbAQL N3bFLS4Quk66Dhv93Zi6NwopwL1F07UPC5uadkyePStP3PA0OAOemj9vZADOWx5a dsGCKISs8iphNC5mDVoLy8Ry49Ms3eQXRjVQOMco3YNf8AhsIdxDNBVN8VTDKVkE DwIDAQAB -----END PUBLIC KEY-----"# .as_bytes(), &SigningScheme::RSA_PKCS1_SHA256(0), ) .expect("Cannot create CosignVerificationKey"); let msg = r#"{"critical":{"identity":{"docker-reference":"registry.suse.com/suse/sle-micro/5.0/toolbox"},"image":{"docker-manifest-digest":"sha256:356631f7603526a0af827741f5fe005acf19b7ef7705a34241a91c2d47a6db5e"},"type":"cosign container image signature"},"optional":{"creator":"OBS"}}"#; assert!( verification_key .verify_signature(signature, msg.as_bytes()) .is_ok() ); } #[test] fn convert_ecdsa_p256_subject_public_key_to_cosign_verification_key() -> anyhow::Result<()> { let (private_key, public_key) = generate_ecdsa_p256_keypair(); let issued_cert_generation_options = CertGenerationOptions { private_key, public_key, ..Default::default() }; let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let issued_cert = generate_certificate(Some(&ca_data), issued_cert_generation_options)?; let issued_cert_pem = issued_cert.cert.to_pem()?; let pem = pem::parse(issued_cert_pem)?; let cert = Certificate::from_der(pem.contents())?; let spki = cert.tbs_certificate.subject_public_key_info; let cosign_verification_key = CosignVerificationKey::try_from(&spki).expect("conversion failed"); assert!(matches!( cosign_verification_key, CosignVerificationKey::ECDSA_P256_SHA256_ASN1(_) )); Ok(()) } #[test] fn convert_ecdsa_p384_subject_public_key_to_cosign_verification_key() -> anyhow::Result<()> { let (private_key, public_key) = generate_ecdsa_p384_keypair(); let issued_cert_generation_options = CertGenerationOptions { private_key, public_key, ..Default::default() }; let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let issued_cert = generate_certificate(Some(&ca_data), issued_cert_generation_options)?; let issued_cert_pem = issued_cert.cert.to_pem()?; let pem = pem::parse(issued_cert_pem)?; let cert = Certificate::from_der(pem.contents())?; let spki = cert.tbs_certificate.subject_public_key_info; let cosign_verification_key = CosignVerificationKey::try_from(&spki).expect("conversion failed"); assert!(matches!( cosign_verification_key, CosignVerificationKey::ECDSA_P384_SHA384_ASN1(_) )); Ok(()) } #[test] fn convert_rsa_subject_public_key_to_cosign_verification_key() -> anyhow::Result<()> { let (private_key, public_key) = generate_rsa_keypair(2048); let issued_cert_generation_options = CertGenerationOptions { private_key, public_key, ..Default::default() }; let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let issued_cert = generate_certificate(Some(&ca_data), issued_cert_generation_options)?; let issued_cert_pem = issued_cert.cert.to_pem()?; let pem = pem::parse(issued_cert_pem)?; let cert = Certificate::from_der(pem.contents())?; let spki = cert.tbs_certificate.subject_public_key_info; let cosign_verification_key = CosignVerificationKey::try_from(&spki).expect("conversion failed"); assert!(matches!( cosign_verification_key, CosignVerificationKey::RSA_PKCS1_SHA256(_) )); Ok(()) } #[test] fn convert_ed25519_subject_public_key_to_cosign_verification_key() -> anyhow::Result<()> { let (private_key, public_key) = generate_ed25519_keypair(); let issued_cert_generation_options = CertGenerationOptions { private_key, public_key, ..Default::default() }; let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let issued_cert = generate_certificate(Some(&ca_data), issued_cert_generation_options)?; let issued_cert_pem = issued_cert.cert.to_pem()?; let pem = pem::parse(issued_cert_pem)?; let cert = Certificate::from_der(pem.contents())?; let spki = cert.tbs_certificate.subject_public_key_info; let cosign_verification_key = CosignVerificationKey::try_from(&spki).expect("conversion failed"); assert!(matches!( cosign_verification_key, CosignVerificationKey::ED25519(_) )); Ok(()) } #[test] fn convert_unsupported_curve_subject_public_key_to_cosign_verification_key() -> anyhow::Result<()> { let (private_key, public_key) = generate_dsa_keypair(2048); let issued_cert_generation_options = CertGenerationOptions { private_key, public_key, ..Default::default() }; let ca_data = generate_certificate(None, CertGenerationOptions::default())?; let issued_cert = generate_certificate(Some(&ca_data), issued_cert_generation_options)?; let issued_cert_pem = issued_cert.cert.to_pem()?; let pem = pem::parse(issued_cert_pem)?; let cert = Certificate::from_der(pem.contents())?; let spki = cert.tbs_certificate.subject_public_key_info; let err = CosignVerificationKey::try_from(&spki); assert!(matches!( err, Err(SigstoreError::PublicKeyUnsupportedAlgorithmError(_)) )); Ok(()) } } ================================================ FILE: src/errors.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! The errors that can be raised by sigstore-rs use thiserror::Error; #[cfg(feature = "cosign")] use crate::cosign::{ constraint::SignConstraintRefVec, verification_constraint::VerificationConstraintRefVec, }; use crate::crypto::merkle::MerkleProofError; #[cfg(feature = "cosign")] #[cfg_attr(docsrs, doc(cfg(feature = "cosign")))] #[derive(Error, Debug)] #[error("Several Signature Layers failed verification")] pub struct SigstoreVerifyConstraintsError<'a> { pub unsatisfied_constraints: VerificationConstraintRefVec<'a>, } #[cfg(feature = "cosign")] #[cfg_attr(docsrs, doc(cfg(feature = "cosign")))] #[derive(Error, Debug)] #[error("Several Constraints failed to apply on the SignatureLayer")] pub struct SigstoreApplicationConstraintsError<'a> { pub unapplied_constraints: SignConstraintRefVec<'a>, } pub type Result = std::result::Result; #[derive(Error, Debug)] pub enum SigstoreError { #[error("failed to parse URL: {0}")] UrlParseError(#[from] url::ParseError), #[error("failed to construct redirect URL")] RedirectUrlRequestLineError, #[error("failed to construct oauth code pair")] CodePairError, #[error("invalid key format: {error}")] InvalidKeyFormat { error: String }, #[error("Unable to parse identity token: {0}")] IdentityTokenError(String), #[error("unmatched key type {key_typ} and signing scheme {scheme}")] UnmatchedKeyAndSigningScheme { key_typ: String, scheme: String }, #[error("x509 error: {0}")] X509Error(String), #[error(transparent)] FromPEMError(#[from] pem::PemError), #[error(transparent)] Base64DecodeError(#[from] base64::DecodeError), #[cfg(any( feature = "sign", feature = "sigstore-trust-root", feature = "rekor", feature = "verify" ))] #[error(transparent)] HexDecodeError(#[from] hex::FromHexError), #[error("Unable to parse checkpoint: {0}")] ParseCheckpointError(String), #[error("Public key with unsupported algorithm: {0}")] PublicKeyUnsupportedAlgorithmError(String), #[error("Public key verification error")] PublicKeyVerificationError, #[error("No valid signature found for checkpoint")] CheckpointSignatureVerificationError, #[error("X.509 certificate version is not V3")] CertificateUnsupportedVersionError, #[error("Certificate validity check failed: cannot be used before {0}")] CertificateValidityError(String), #[error("Certificate has not been issued for {0}")] CertificateInvalidEmail(String), #[error( "Certificate expired before signatures were entered in log: {integrated_time} is before {not_before}" )] CertificateExpiredBeforeSignaturesSubmittedToRekor { integrated_time: String, not_before: String, }, #[error( "Certificate was issued after signatures were entered in log: {integrated_time} is after {not_after}" )] CertificateIssuedAfterSignaturesSubmittedToRekor { integrated_time: String, not_after: String, }, #[error("Bundled certificate does not have digital signature key usage")] CertificateWithoutDigitalSignatureKeyUsage, #[error("Bundled certificate does not have code signing extended key usage")] CertificateWithoutCodeSigningKeyUsage, #[error("Certificate without Subject Alternative Name")] CertificateWithoutSubjectAlternativeName, #[error("Certificate with incomplete Subject Alternative Name")] CertificateWithIncompleteSubjectAlternativeName, #[error("Certificate pool error: {0}")] CertificatePoolError(String), #[error("Consistency proof error: {0:?}")] ConsistencyProofError(MerkleProofError), #[error("Inclusion Proof error: {0:?}")] InclusionProofError(MerkleProofError), #[error("Signing session expired")] ExpiredSigningSession(), #[error("Fulcio request unsuccessful: {0}")] FulcioClientError(String), #[error("Cannot fetch manifest of {image}: {error}")] RegistryFetchManifestError { image: String, error: String }, #[error("Cannot pull manifest of {image}: {error}")] RegistryPullManifestError { image: String, error: String }, #[error("Cannot pull {image}: {error}")] RegistryPullError { image: String, error: String }, #[error("Cannot push {image}: {error}")] RegistryPushError { image: String, error: String }, #[error("Rekor request unsuccessful: {0}")] RekorClientError(String), #[error("Rekor public key not found for key id {0}")] RekorPublicKeyNotFoundError(String), #[error(transparent)] JoinError(#[from] tokio::task::JoinError), #[cfg(feature = "cert")] #[error(transparent)] KeyringError(#[from] crate::crypto::keyring::KeyringError), #[cfg(any(feature = "sign", feature = "verify"))] #[error(transparent)] SCTError(#[from] crate::crypto::transparency::SCTError), // HACK(tnytown): Remove when we rework the Fulcio V2 endpoint. #[cfg(any(feature = "fulcio", feature = "oauth"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "fulcio", feature = "oauth"))))] #[error(transparent)] ReqwestError(#[from] reqwest::Error), #[error("OCI reference not valid: {reference}")] OciReferenceNotValidError { reference: String }, #[error("Sigstore bundle malformed: {0}")] SigstoreBundleMalformedError(String), #[error("Sigstore PKI file is malformed: {0}")] SigstorePKIFileMalformedError(String), #[error("Layer doesn't have Sigstore media type")] SigstoreMediaTypeNotFoundError, #[error("Layer digest mismatch")] SigstoreLayerDigestMismatchError, #[error("Missing signature annotation")] SigstoreAnnotationNotFoundError, #[error("Rekor bundle missing")] SigstoreRekorBundleNotFoundError, #[error("Fulcio certificates not provided")] SigstoreFulcioCertificatesNotProvidedError, #[error("No Signature Layer passed verification")] SigstoreNoVerifiedLayer, #[cfg(feature = "sigstore-trust-root")] #[cfg_attr(docsrs, doc(cfg(feature = "sigstore-trust-root")))] #[error(transparent)] TufError(#[from] Box), #[error("TUF target {0} not found inside of repository")] TufTargetNotFoundError(String), #[error("{0}")] TufMetadataError(String), #[error(transparent)] IOError(#[from] std::io::Error), #[error("IOError: {context}: {source}")] IOErrorWithContext { context: String, #[source] source: std::io::Error, }, #[error("{0}")] UnexpectedError(String), #[error("{0}")] VerificationConstraintError(String), #[error("{0}")] VerificationMaterialError(String), #[error("{0}")] ApplyConstraintError(String), #[error("Verification of OIDC claims received from OpenIdProvider failed")] ClaimsVerificationError, #[cfg(feature = "oauth")] #[error("Claims configuration error: {0}")] ClaimsConfigurationError(#[from] openidconnect::ConfigurationError), #[error("Failed to access token endpoint")] ClaimsAccessPointError, #[error("Failed to get id_token")] NoIDToken, #[error("Pkcs8 error : {0}")] PKCS8Error(String), #[error("Pkcs8 spki error : {0}")] PKCS8SpkiError(String), #[error("Pkcs8 der encoding/decoding error : {0}")] PKCS8DerError(String), #[error(transparent)] ECDSAError(#[from] ecdsa::Error), #[error(transparent)] ECError(#[from] elliptic_curve::Error), #[error(transparent)] ScryptKDFInvalidParamsError(#[from] scrypt::errors::InvalidParams), #[error(transparent)] ScryptKDFInvalidOutputLenError(#[from] scrypt::errors::InvalidOutputLen), #[error("Failed to encrypt the private key: {0}")] PrivateKeyEncryptError(String), #[error("Failed to decrypt the private key: {0}")] PrivateKeyDecryptError(String), #[error(transparent)] SerdeJsonError(#[from] serde_json::error::Error), #[error(transparent)] Utf8Error(#[from] std::str::Utf8Error), #[error(transparent)] WebPKIError(#[from] webpki::Error), #[error("Failed to parse the key: {0}")] KeyParseError(String), #[error(transparent)] RSAError(#[from] rsa::errors::Error), #[error(transparent)] PKCS1Error(#[from] pkcs1::Error), #[error(transparent)] Ed25519PKCS8Error(#[from] ed25519_dalek::pkcs8::spki::Error), #[error(transparent)] X509ParseError(#[from] x509_cert::der::Error), #[error(transparent)] X509BuilderError(#[from] x509_cert::builder::Error), } ================================================ FILE: src/fulcio/mod.rs ================================================ pub(crate) mod models; pub mod oauth; use crate::crypto::SigningScheme; use crate::crypto::signing_key::SigStoreSigner; use crate::errors::{Result, SigstoreError}; use crate::fulcio::models::{CreateSigningCertificateRequest, SigningCertificate}; use crate::fulcio::oauth::OauthTokenProvider; use crate::oauth::IdentityToken; use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64_STD_ENGINE}; use openidconnect::core::CoreIdToken; use reqwest::{Body, header}; use serde::ser::SerializeStruct; use serde::{Serialize, Serializer}; use std::fmt::{Debug, Display, Formatter}; use url::Url; use x509_cert::{Certificate, der::Decode}; pub use models::{CertificateResponse, SigningCertificateDetachedSCT}; /// Default public Fulcio server root. pub const FULCIO_ROOT: &str = "https://fulcio.sigstore.dev/"; /// Path within Fulcio to obtain a signing certificate. pub const SIGNING_CERT_PATH: &str = "api/v1/signingCert"; pub const SIGNING_CERT_V2_PATH: &str = "api/v2/signingCert"; const CONTENT_TYPE_HEADER_NAME: &str = "content-type"; /// Fulcio certificate signing request /// /// Used to present a public key and signed challenge/proof-of-key in exchange /// for a signed X509 certificate in return. #[derive(Serialize, Debug)] #[serde(rename_all = "camelCase")] struct Csr { public_key: Option, signed_email_address: Option, } impl TryFrom for Body { type Error = serde_json::Error; fn try_from(csr: Csr) -> std::result::Result { Ok(Body::from(serde_json::to_string(&csr)?)) } } /// Internal newtype to control serde jsonification. #[derive(Debug)] struct PublicKey(String, SigningScheme); impl Serialize for PublicKey { fn serialize(&self, serializer: S) -> std::result::Result where S: Serializer, { let mut pk = serializer.serialize_struct("PublicKey", 2)?; pk.serialize_field("content", &self.0)?; pk.serialize_field( "algorithm", match self.1 { SigningScheme::ECDSA_P256_SHA256_ASN1 | SigningScheme::ECDSA_P384_SHA384_ASN1 => { "ecdsa" } SigningScheme::ED25519 => "ed25519", SigningScheme::RSA_PSS_SHA256(_) | SigningScheme::RSA_PSS_SHA384(_) | SigningScheme::RSA_PSS_SHA512(_) | SigningScheme::RSA_PKCS1_SHA256(_) | SigningScheme::RSA_PKCS1_SHA384(_) | SigningScheme::RSA_PKCS1_SHA512(_) => "rsa", }, )?; pk.end() } } /// The PEM-encoded certificate chain returned by Fulcio. pub struct FulcioCert(String); impl AsRef<[u8]> for FulcioCert { fn as_ref(&self) -> &[u8] { self.0.as_ref() } } impl Display for FulcioCert { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { std::fmt::Display::fmt(&self.0, f) } } /// Provider for Fulcio token. #[allow(clippy::large_enum_variant)] pub enum TokenProvider { /// A Static provider consists of a tuple where the first value is a /// OIDC token. The second is the value of the challenge. /// /// To figure out the correct value for the challenge one can list the /// issuers available: /// ```console /// $ curl -Ls https://fulcio.sigstore.dev/api/v2/configuration | jq /// ``` /// Find the issuer of the token, and then find the value of the /// `challengeClaim` which will specify which value of the OIDC token's /// claims to use. /// /// For example, if the token was issued from /// `https://token.actions.githubusercontent.com`: /// ```json /// { /// "issuerUrl": "https://token.actions.githubusercontent.com", /// "audience": "sigstore", /// "challengeClaim": "sub", /// "spiffeTrustDomain": "" /// } /// ``` /// In this case the value of the challenge should be the value of the /// `sub` (`subject`) claim of the token. /// Static((CoreIdToken, String)), Oauth(OauthTokenProvider), } impl TokenProvider { /// Retrieve a token and the challenge-to-sign from the provider. pub async fn get_token(&self) -> Result<(CoreIdToken, String)> { match self { TokenProvider::Static(inner) => Ok(inner.clone()), TokenProvider::Oauth(auth) => auth.get_token().await, } } } /// Client for creating and holding ephemeral key pairs, and easily /// getting a Fulcio-signed certificate chain. pub struct FulcioClient { root_url: Url, token_provider: TokenProvider, } impl FulcioClient { /// Create a new Fulcio client. /// /// * root_url: The root Fulcio server URL. /// * token_provider: Provider capable of providing a CoreIdToken and the challenge to sign. /// /// Returns a configured Fulcio client. pub fn new(root_url: Url, token_provider: TokenProvider) -> Self { Self { root_url, token_provider, } } /// Request a certificate from Fulcio /// /// * signing_scheme: The signing scheme to use. /// /// Returns a tuple of the appropriately-configured sigstore signer and the Fulcio-issued certificate chain. pub async fn request_cert( self, signing_scheme: SigningScheme, ) -> Result<(SigStoreSigner, FulcioCert)> { let (token, challenge) = self.token_provider.get_token().await?; let signer = signing_scheme.create_signer()?; let signature = signer.sign(challenge.as_bytes())?; let signature = BASE64_STD_ENGINE.encode(signature); let key_pair = signer.to_sigstore_keypair()?; let public_key = key_pair.public_key_to_der()?; let public_key = BASE64_STD_ENGINE.encode(public_key); let csr = Csr { public_key: Some(PublicKey(public_key, signing_scheme)), signed_email_address: Some(signature), }; let csr = TryInto::::try_into(csr)?; let client = reqwest::Client::new(); let response = client .post(self.root_url.join(SIGNING_CERT_PATH)?) .header(CONTENT_TYPE_HEADER_NAME, "application/json") .bearer_auth(token.to_string()) .body(csr) .send() .await .map_err(|_| SigstoreError::SigstoreFulcioCertificatesNotProvidedError)?; let cert = response .text() .await .map_err(|_| SigstoreError::SigstoreFulcioCertificatesNotProvidedError)?; Ok((signer, FulcioCert(cert))) } /// Request a certificate from Fulcio with the V2 endpoint. /// /// TODO(tnytown): This (and other API clients) should be autogenerated. See sigstore-rs#209. /// /// /// /// Additionally, it might not be reasonable to expect callers to correctly construct and pass /// in an X509 CSR. pub async fn request_cert_v2( &self, request: x509_cert::request::CertReq, identity: &IdentityToken, ) -> Result { let client = reqwest::Client::new(); macro_rules! headers { ($($key:expr => $val:expr),+) => { { let mut map = reqwest::header::HeaderMap::new(); $( map.insert($key, $val.parse().unwrap()); )+ map } } } let headers = headers!( header::AUTHORIZATION => format!("Bearer {}", identity.to_string()), header::CONTENT_TYPE => "application/json", header::ACCEPT => "application/pem-certificate-chain" ); let response = client .post(self.root_url.join(SIGNING_CERT_V2_PATH)?) .headers(headers) .json(&CreateSigningCertificateRequest { certificate_signing_request: request, }) .send() .await?; let response = response.json().await?; let (certs, detached_sct) = match response { SigningCertificate::SignedCertificateDetachedSct(ref sc) => { (&sc.chain.certificates, Some(sc.clone())) } SigningCertificate::SignedCertificateEmbeddedSct(ref sc) => { (&sc.chain.certificates, None) } }; if certs.len() < 2 { return Err(SigstoreError::FulcioClientError( "Certificate chain too short: certs.len() < 2".into(), )); } let cert = Certificate::from_der(certs[0].contents())?; let chain = certs[1..] .iter() .map(|pem| Certificate::from_der(pem.contents())) .collect::, _>>()?; Ok(CertificateResponse { cert, chain, detached_sct, }) } } ================================================ FILE: src/fulcio/models.rs ================================================ // Copyright 2023 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Models for interfacing with Fulcio. //! //! use pem::Pem; use pkcs8::der::EncodePem; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_repr::Deserialize_repr; use serde_with::{ DeserializeAs, SerializeAs, base64::{Base64, Standard}, formats::Padded, serde_as, }; use x509_cert::Certificate; fn serialize_x509_csr( input: &x509_cert::request::CertReq, ser: S, ) -> std::result::Result where S: Serializer, { let encoded = input .to_pem(pkcs8::LineEnding::LF) .map_err(serde::ser::Error::custom)?; Base64::::serialize_as(&encoded, ser) } fn deserialize_inner_detached_sct<'de, D>(de: D) -> std::result::Result where D: Deserializer<'de>, { let buf: Vec = Base64::::deserialize_as(de)?; serde_json::from_slice(&buf).map_err(serde::de::Error::custom) } fn deserialize_inner_detached_sct_signature<'de, D>(de: D) -> Result, D::Error> where D: Deserializer<'de>, { let buf: Vec = Base64::::deserialize_as(de)?; // The first two bytes indicate the signature and hash algorithms so let's skip those. // The next two bytes indicate the size of the signature. let signature_size = u16::from_be_bytes(buf[2..4].try_into().expect("unexpected length")); // This should be equal to the length of the remainder of the signature buffer. let signature = buf[4..].to_vec(); if signature_size as usize != signature.len() { return Err(serde::de::Error::custom("signature size mismatch")); } Ok(signature) } #[derive(Serialize)] #[serde(rename_all = "camelCase")] pub struct CreateSigningCertificateRequest { #[serde(serialize_with = "serialize_x509_csr")] pub certificate_signing_request: x509_cert::request::CertReq, } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub enum SigningCertificate { SignedCertificateDetachedSct(SigningCertificateDetachedSCT), SignedCertificateEmbeddedSct(SigningCertificateEmbeddedSCT), } #[derive(Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] pub struct SigningCertificateDetachedSCT { pub chain: CertificateChain, #[serde(deserialize_with = "deserialize_inner_detached_sct")] pub signed_certificate_timestamp: InnerDetachedSCT, } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct SigningCertificateEmbeddedSCT { pub chain: CertificateChain, } #[derive(Deserialize, Debug, Clone)] pub struct CertificateChain { pub certificates: Vec, } #[serde_as] #[derive(Deserialize, Debug, Clone)] pub struct InnerDetachedSCT { pub sct_version: SCTVersion, #[serde_as(as = "Base64")] pub id: [u8; 32], pub timestamp: u64, #[serde(deserialize_with = "deserialize_inner_detached_sct_signature")] pub signature: Vec, #[serde_as(as = "Base64")] pub extensions: Vec, } #[derive(Deserialize_repr, PartialEq, Debug, Clone)] #[repr(u8)] pub enum SCTVersion { V1 = 0, } pub struct CertificateResponse { pub cert: Certificate, pub chain: Vec, pub detached_sct: Option, } ================================================ FILE: src/fulcio/oauth.rs ================================================ use crate::errors::Result; use crate::errors::SigstoreError; use crate::oauth::openidflow::{OpenIDAuthorize, RedirectListener}; use openidconnect::core::CoreIdToken; /// Default client id ("sigstore"). pub const DEFAULT_CLIENT_ID: &str = "sigstore"; /// Default client secret (the empty string) pub const DEFAULT_CLIENT_SECRET: &str = ""; /// Default issuer (Oauth provider at sigstore.dev) pub const DEFAULT_ISSUER: &str = "https://oauth2.sigstore.dev/auth"; /// Default local redirect port (8080) pub const DEFAULT_REDIRECT_PORT: u32 = 8080; /// Token provider that performs a human-involved OIDC flow to acquire a token id. #[derive(Default)] pub struct OauthTokenProvider { client_id: Option, client_secret: Option, issuer: Option, redirect_port: Option, } impl OauthTokenProvider { /// Set a non-default client-id. pub fn with_client_id(self, client_id: &str) -> Self { Self { client_id: Some(client_id.to_string()), client_secret: self.client_secret, issuer: self.issuer, redirect_port: self.redirect_port, } } /// Set a non-default client secret. pub fn with_client_secret(self, client_secret: &str) -> Self { Self { client_id: self.client_id, client_secret: Some(client_secret.to_string()), issuer: self.issuer, redirect_port: self.redirect_port, } } /// Set a non-default issuer. pub fn with_issuer(self, issuer: &str) -> Self { Self { client_id: self.client_id, client_secret: self.client_secret, issuer: Some(issuer.to_string()), redirect_port: self.redirect_port, } } /// Set a non-default redirect port. pub fn with_redirect_port(self, port: u32) -> Self { Self { client_id: self.client_id, client_secret: self.client_secret, issuer: self.issuer, redirect_port: Some(port), } } fn redirect_url(&self) -> String { format!( "http://localhost:{}", self.redirect_port.unwrap_or(DEFAULT_REDIRECT_PORT) ) } /// Perform human-involved OIDC flow to acquire an id token, along with /// the extracted email claim value for use in signed challenge with Fulcio. pub async fn get_token(&self) -> Result<(CoreIdToken, String)> { let oidc_url = OpenIDAuthorize::new( self.client_id .as_ref() .unwrap_or(&DEFAULT_CLIENT_ID.to_string()), self.client_secret .as_ref() .unwrap_or(&DEFAULT_CLIENT_SECRET.to_string()), self.issuer.as_ref().unwrap_or(&DEFAULT_ISSUER.to_string()), &self.redirect_url(), ) .auth_url_async() .await; match oidc_url.as_ref() { Ok(url) => { webbrowser::open(url.0.as_ref())?; println!( "Open this URL in a browser if it does not automatically open for you:\n{}\n", url.0, ); } Err(e) => println!("{e}"), } let oidc_url = oidc_url?; let result = RedirectListener::new( &format!( "127.0.0.1:{}", self.redirect_port.unwrap_or(DEFAULT_REDIRECT_PORT) ), oidc_url.1.clone(), // client oidc_url.2.clone(), // nonce oidc_url.3, // pkce_verifier ) .redirect_listener_async() .await; if let Ok((_, id_token)) = result { let verifier = oidc_url.1.id_token_verifier(); let nonce = &oidc_url.2; let claims = id_token.claims(&verifier, nonce); if let Ok(claims) = claims && let Some(email) = claims.email() { let email = &**email; return Ok((id_token.clone(), email.clone())); } } Err(SigstoreError::NoIDToken) } } ================================================ FILE: src/lib.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! This crate aims to provide [Sigstore](https://www.sigstore.dev/) capabilities to Rust developers. //! //! Currently, the main focus of the crate is to provide the verification //! capabilities offered by the official `cosign` tool. //! //! **Warning:** this library is still experimental. Its API can change at any time. //! //! # Security //! //! Should you discover any security issues, please refer to //! Sigstore's [security process](https://github.com/sigstore/community/blob/main/SECURITY.md). //! //! # Verification //! //! Sigstore verification is done using the [`cosign::Client`] //! struct. //! //! ## Triangulation of Sigstore signature //! //! Given a container image/oci artifact, calculate the location of //! its cosign signature inside of a registry: //! //! ```rust,no_run //! use crate::sigstore::cosign::CosignCapabilities; //! use std::fs; //! //! #[tokio::main] //! pub async fn main() { //! let auth = &sigstore::registry::Auth::Anonymous; //! //! let mut client = sigstore::cosign::ClientBuilder::default() //! .build() //! .expect("Unexpected failure while building Client"); //! let image = "registry-testing.svc.lan/kubewarden/disallow-service-nodeport:v0.1.0".parse().unwrap(); //! let (cosign_signature_image, source_image_digest) = client.triangulate( //! &image, //! auth //! ).await.expect("Unexpected failure while using triangulate"); //! } //! ``` //! //! ## Signature verification //! //! Verify the signature of a container image/oci artifact: //! //!```rust,no_run //! use std::{boxed::Box, collections::BTreeMap, fs}; //! //! use sigstore::{ //! cosign::{ //! CosignCapabilities, //! verification_constraint::{ //! AnnotationVerifier, PublicKeyVerifier, VerificationConstraintVec, //! }, //! verify_constraints, //! }, //! crypto::SigningScheme, //! errors::SigstoreVerifyConstraintsError, //! trust::sigstore::SigstoreTrustRoot, //! }; //! //! #[tokio::main] //! pub async fn main() { //! let auth = &sigstore::registry::Auth::Anonymous; //! //! // Initialize a Sigstore trust root. This will fetch the latest trust //! // materials from the Sigstore TUF repository. //! let repo = SigstoreTrustRoot::new(None) //! .await //! .expect("Could not initialize Sigstore trust root"); //! //! let mut client = sigstore::cosign::ClientBuilder::default() //! .with_trust_repository(&repo) //! .expect("Cannot construct cosign client from given materials") //! .build() //! .expect("Unexpected failure while building Client"); //! //! // Obtained via `triangulate` //! let cosign_image = "registry-testing.svc.lan/kubewarden/disallow-service-nodeport:sha256-5f481572d088dc4023afb35fced9530ced3d9b03bf7299c6f492163cb9f0452e.sig" //! .parse().unwrap(); //! // Obtained via `triangulate` //! let source_image_digest = //! "sha256-5f481572d088dc4023afb35fced9530ced3d9b03bf7299c6f492163cb9f0452e"; //! //! // Obtain the list of associated signature layers that can be trusted //! let signature_layers = client //! .trusted_signature_layers(auth, source_image_digest, &cosign_image) //! .await //! .expect("Could not obtain signature layers"); //! //! // Define verification constraints //! let mut annotations: BTreeMap = BTreeMap::new(); //! annotations.insert("env".to_string(), "prod".to_string()); //! let annotation_verifier = AnnotationVerifier { annotations }; //! //! let verification_key = //! fs::read("~/cosign.pub").expect("Cannot read contents of cosign public key"); //! let pub_key_verifier = PublicKeyVerifier::new(&verification_key, &SigningScheme::default()) //! .expect("Could not create verifier"); //! //! let verification_constraints: VerificationConstraintVec = //! vec![Box::new(annotation_verifier), Box::new(pub_key_verifier)]; //! //! // Use the given list of constraints to verify the trusted //! // `signature_layers`. This will raise an error if one or more verification //! // constraints are not satisfied. //! let result = verify_constraints(&signature_layers, verification_constraints.iter()); //! //! match result { //! Ok(()) => { //! println!("Image successfully verified"); //! } //! Err(SigstoreVerifyConstraintsError { //! unsatisfied_constraints, //! }) => { //! println!("{:?}", unsatisfied_constraints); //! panic!("Image verification failed") //! } //! } //! } //! ``` //! # Rekor integration //! The examples folder contains code that shows users how to make Rekor API calls. //! It also provides a clean interface with step-by-step instructions that other developers can copy and paste. //! //! ```rust,no_run //! use clap::{Arg, Command}; //! use sigstore::rekor::apis::{configuration::Configuration, entries_api}; //! use sigstore::rekor::models::log_entry::LogEntry; //! use std::str::FromStr; //! #[tokio::main] //! async fn main() { //! /* //! Retrieves an entry and inclusion proof from the transparency log (if it exists) by index //! Example command : //! cargo run --example get_log_entry_by_index -- --log_index 99 //! */ //! let matches = Command::new("cmd").arg( //! Arg::new("log_index") //! .long("log_index") //! .value_name("LOG_INDEX") //! .help("log_index of the artifact"), //! ); //! //! let flags = matches.get_matches(); //! let index = ::from_str( //! flags.get_one::("log_index") //! .unwrap_or(&"1".to_string()) //! ).unwrap(); //! //! let configuration = Configuration::default(); //! //! let message: LogEntry = entries_api::get_log_entry_by_index(&configuration, index) //! .await //! .unwrap(); //! println!("{:#?}", message); //! } //! ``` //! //! The following comment in the code tells the user how to provide the required values to the API calls using cli flags. //! //! In the example below, the user can retrieve different entries by inputting a different value for the log_index flag. //! //! //!/* //!Retrieves an entry and inclusion proof from the transparency log (if it exists) by index //!Example command : //!cargo run --example get_log_entry_by_index -- --log_index 99 //!*/ //! //! # The example code is provided for the following API calls: //! //!- create_log_entry //!- get_log_entry_by_index //!- get_log_entry_by_uuid //!- get_log_info //!- get_log_proof //!- get_public_key //!- search_index //!- search_log_query //! //! //! # Examples //! //! Additional examples can be found inside of the [`examples`](https://github.com/sigstore/sigstore-rs/tree/main/examples/) //! directory. //! //! ## Fulcio and Rekor integration //! //! [`cosign::Client`] integration with Fulcio and Rekor //! requires the following data to work: Fulcio's certificate and Rekor's public key. //! //! These files are safely distributed by the Sigstore project via a TUF repository. //! The [`sigstore::trust::sigstore`](crate::trust::sigstore) module provides the helper structures to deal //! with it. //! //! # Feature Flags //! //! Sigstore-rs uses a set of [feature flags] to reduce the amount of compiled code. //! By default, all features are enabled, and `native-tls` is used for TLS. //! It is recommended to use `default-features = false` in your `Cargo.toml` //! and only enable the features you need. //! //! ## TLS (Required) //! //! One of these features must be enabled: //! //! - `native-tls`: Enables support for `native-tls` as the underlying tls for all the features. //! - `rustls-tls`: Enables support for `rustls-tls` as the underlying tls for all the features. //! //! ## Features //! //! - `full`: Enables all features documented below. //! - `cosign`: Enables support for `cosign`. //! - `cached-client`: Enables an in-memory cache for the cosign registry client. //! - `fulcio`: Enables support for `fulcio`. //! - `oauth`: Enables support for `oauth`. //! - `registry`: Enables support for `registry`. //! - `rekor`: Enables support for `rekor`. //! - `sign`: Enables support for signing. //! - `verify`: Enables support for verification. //! - `bundle`: Includes both `sign` and `verify`. //! - `sigstore-trust-root`: Enables support for Sigstore trust root. #![forbid(unsafe_code)] #![warn(clippy::unwrap_used, clippy::panic)] #![cfg_attr(docsrs, feature(doc_cfg))] pub mod crypto; pub mod trust; #[cfg_attr(docsrs, doc(cfg(feature = "mock-client")))] #[cfg(feature = "mock-client")] mod mock_client; #[cfg_attr(docsrs, doc(cfg(feature = "cosign")))] #[cfg(feature = "cosign")] pub mod cosign; pub mod errors; #[cfg_attr(docsrs, doc(cfg(feature = "fulcio")))] #[cfg(feature = "fulcio")] pub mod fulcio; #[cfg_attr(docsrs, doc(cfg(feature = "oauth")))] #[cfg(feature = "oauth")] pub mod oauth; #[cfg_attr(docsrs, doc(cfg(feature = "registry")))] #[cfg(feature = "registry")] pub mod registry; #[cfg_attr(docsrs, doc(cfg(feature = "rekor")))] #[cfg(feature = "rekor")] pub mod rekor; #[cfg_attr(docsrs, doc(cfg(any(feature = "sign", feature = "verify"))))] #[cfg(any(feature = "sign", feature = "verify"))] pub mod bundle; ================================================ FILE: src/mock_client.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #[cfg(test)] pub(crate) mod test { use crate::errors::{Result, SigstoreError}; use async_trait::async_trait; use oci_client::{ Reference, client::{ImageData, PushResponse}, manifest::OciManifest, secrets::RegistryAuth, }; #[derive(Default)] pub struct MockOciClient { pub fetch_manifest_digest_response: Option>, pub pull_response: Option>, pub pull_manifest_response: Option>, pub push_response: Option>, } impl crate::registry::ClientCapabilitiesDeps for MockOciClient {} #[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] impl crate::registry::ClientCapabilities for MockOciClient { async fn fetch_manifest_digest( &mut self, image: &Reference, _auth: &RegistryAuth, ) -> Result { let mock_response = self .fetch_manifest_digest_response .as_ref() .ok_or_else(|| SigstoreError::RegistryFetchManifestError { image: image.whole(), error: String::from("No fetch_manifest_digest_response provided!"), })?; match mock_response { Ok(r) => Ok(r.clone()), Err(e) => Err(SigstoreError::RegistryFetchManifestError { image: image.whole(), error: e.to_string(), }), } } async fn pull( &mut self, image: &Reference, _auth: &RegistryAuth, _accepted_media_types: Vec<&str>, ) -> Result { let mock_response = self.pull_response .as_ref() .ok_or_else(|| SigstoreError::RegistryPullError { image: image.whole(), error: String::from("No pull_response provided!"), })?; match mock_response { Ok(r) => Ok(r.clone()), Err(e) => Err(SigstoreError::RegistryPullError { image: image.whole(), error: e.to_string(), }), } } async fn pull_manifest( &mut self, image: &Reference, _auth: &RegistryAuth, ) -> Result<(OciManifest, String)> { let mock_response = self.pull_manifest_response.as_ref().ok_or_else(|| { SigstoreError::RegistryPullError { image: image.whole(), error: String::from("No pull_manifest_response provided!"), } })?; match mock_response { Ok(r) => Ok(r.clone()), Err(e) => Err(SigstoreError::RegistryPullError { image: image.whole(), error: e.to_string(), }), } } async fn push( &mut self, image_ref: &oci_client::Reference, _layers: &[oci_client::client::ImageLayer], _config: oci_client::client::Config, _auth: &oci_client::secrets::RegistryAuth, _manifest: Option, ) -> Result { let mock_response = self.push_response .as_ref() .ok_or_else(|| SigstoreError::RegistryPushError { image: image_ref.whole(), error: String::from("No push_response provided!"), })?; match mock_response { Ok(r) => Ok(PushResponse { config_url: r.config_url.clone(), manifest_url: r.manifest_url.clone(), }), Err(e) => Err(SigstoreError::RegistryPushError { image: image_ref.whole(), error: e.to_string(), }), } } } } ================================================ FILE: src/oauth/http_client.rs ================================================ // // Copyright 2026 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /// Adapter to use reqwest as the HTTP client for openidconnect/oauth2. /// /// The `openidconnect` crate defines HTTP client traits (`AsyncHttpClient`, /// `SyncHttpClient`) that are HTTP-client-agnostic. This module provides /// implementations backed by the `reqwest` version used by this crate, /// avoiding the need to pull in a second copy of reqwest via openidconnect's /// built-in support. use std::{future::Future, pin::Pin}; use openidconnect::{ AsyncHttpClient, HttpClientError, HttpRequest, HttpResponse, SyncHttpClient, http, }; /// A wrapper around [`reqwest::Client`] that implements [`AsyncHttpClient`] /// for use with the `openidconnect` crate. pub(crate) struct AsyncReqwestClient(pub reqwest::Client); impl<'c> AsyncHttpClient<'c> for AsyncReqwestClient { type Error = HttpClientError; #[cfg(target_arch = "wasm32")] type Future = Pin> + 'c>>; #[cfg(not(target_arch = "wasm32"))] type Future = Pin> + Send + Sync + 'c>>; fn call(&'c self, request: HttpRequest) -> Self::Future { Box::pin(async move { let response = self .0 .execute(request.try_into().map_err(Box::new)?) .await .map_err(Box::new)?; let mut builder = http::Response::builder().status(response.status()); #[cfg(not(target_arch = "wasm32"))] { builder = builder.version(response.version()); } for (name, value) in response.headers().iter() { builder = builder.header(name, value); } builder .body(response.bytes().await.map_err(Box::new)?.to_vec()) .map_err(HttpClientError::Http) }) } } /// A wrapper around [`reqwest::blocking::Client`] that implements [`SyncHttpClient`] /// for use with the `openidconnect` crate. /// /// Not available on `wasm32` targets. #[cfg(not(target_arch = "wasm32"))] pub(crate) struct SyncReqwestClient(pub reqwest::blocking::Client); #[cfg(not(target_arch = "wasm32"))] impl SyncHttpClient for SyncReqwestClient { type Error = HttpClientError; fn call(&self, request: HttpRequest) -> Result { let response = self .0 .execute(request.try_into().map_err(Box::new)?) .map_err(Box::new)?; let mut builder = http::Response::builder() .status(response.status()) .version(response.version()); for (name, value) in response.headers().iter() { builder = builder.header(name, value); } builder .body(response.bytes().map_err(Box::new)?.to_vec()) .map_err(HttpClientError::Http) } } ================================================ FILE: src/oauth/mod.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. pub mod openidflow; pub(crate) mod http_client; pub mod token; pub use token::IdentityToken; ================================================ FILE: src/oauth/openidflow.rs ================================================ // // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! This provides a method for retreiving a OpenID Connect ID Token and scope from the sigstore project. //! //! The main entry points are: //! - [`OpenIDAuthorize::auth_url`](OpenIDAuthorize::auth_url) (synchronous, non-wasm only) //! - [`OpenIDAuthorize::auth_url_async`](OpenIDAuthorize::auth_url_async) (async) //! //! Both require four parameters: //! - `client_id`: the client ID of the application //! - `client_secret`: the client secret of the application //! - `issuer`: the URL of the OpenID Connect server //! - `redirect_uri`: the URL of the callback endpoint //! //! They return the following: //! //! - `authorize_url` is a URL that can be opened in a browser. The user will be //! prompted to login and authorize the application. The user will be redirected to //! the `redirect_uri` URL with a code parameter. //! //! - `client` is a client object that can be used to make requests to the OpenID //! Connect server. //! //! - `nonce` is a random value that is used to prevent replay attacks. //! //! - `pkce_verifier` is a PKCE verifier that can be used to generate the code_verifier //! value. //! //! Once you have recieved the above tuple, you can use: //! - [`RedirectListener::redirect_listener`](RedirectListener::redirect_listener) (synchronous, non-wasm only) //! - [`RedirectListener::redirect_listener_async`](RedirectListener::redirect_listener_async) (async) //! //! to get the ID Token and scope. //! //! The `IdTokenClaims` this contains params such as `email` and the `access_token`. //! //! It maybe prefered to instead develop your own listener. If so bypass using the //! [`RedirectListener::redirect_listener`](RedirectListener::redirect_listener) / //! [`RedirectListener::redirect_listener_async`](RedirectListener::redirect_listener_async) function and //! simply send the values retrieved from the [`OpenIDAuthorize::auth_url`](OpenIDAuthorize::auth_url) / //! [`OpenIDAuthorize::auth_url_async`](OpenIDAuthorize::auth_url_async) //! to your own listener. //! //! //! **Warning:** [`OpenIDAuthorize::auth_url`](OpenIDAuthorize::auth_url) performs //! blocking operations. Because of that it can cause panics at runtime if invoked inside of `async` code. //! If you need to use this function inside of an async code you must wrap it inside of a `spawn_blocking` instruction: //! //! ```rust,ignore //! use tokio::task::spawn_blocking; //! //! async fn my_async_function() { //! // ... your code //! //! let oidc_url = spawn_blocking(|| //! oauth::openidflow::OpenIDAuthorize::new( //! "sigstore", //! "", //! "https://oauth2.sigstore.dev/auth", //! "http://localhost:8080", //! ) //! .auth_url() //! ) //! .await //! .expect("Error spawning blocking task"); //! //! // ... your code //! } //! ``` //! This of course has a performance hit when used inside of an async function. use std::{ io::{BufRead, BufReader, Write}, net::TcpListener, }; use openidconnect::{ AuthorizationCode, ClientId, ClientSecret, CsrfToken, IssuerUrl, Nonce, PkceCodeChallenge, PkceCodeVerifier, RedirectUrl, Scope, core::{ CoreAuthenticationFlow, CoreClient, CoreIdToken, CoreIdTokenClaims, CoreIdTokenVerifier, CoreProviderMetadata, CoreTokenResponse, }, }; use tracing::error; use url::Url; use crate::errors::{Result, SigstoreError}; use crate::oauth::http_client::AsyncReqwestClient; #[cfg(not(target_arch = "wasm32"))] use crate::oauth::http_client::SyncReqwestClient; pub(crate) type OpenIdClient = openidconnect::core::CoreClient< openidconnect::EndpointSet, // HasAuthUrl openidconnect::EndpointNotSet, // HasDeviceAuthUrl openidconnect::EndpointNotSet, // HasIntrospectionUrl openidconnect::EndpointNotSet, // HasRevocationUrl openidconnect::EndpointMaybeSet, // HasTokenUrl openidconnect::EndpointMaybeSet, // HasUserInfoUrl >; #[derive(Debug)] pub struct OpenIDAuthorize { oidc_cliend_id: String, oidc_client_secret: String, oidc_issuer: String, redirect_url: String, } impl OpenIDAuthorize { //! Create a new OpenIDAuthorize struct //! //! # Arguments //! //! * `client_id` - the client ID of the application //! * `client_secret` - the client secret of the application //! * `issuer` - the URL of the OpenID Connect server //! * `redirect_url` - client redirect URL //! # Example //! //! ```rust,ignore //! use sigstore::oauth::openidflow::OpenIDAuthorize; //! //! let oidc = OpenIDAuthorize::new("client_id", "client_secret", "https://example.com", "http://localhost:8080").auth_url(); //! ``` pub fn new(client_id: &str, client_secret: &str, issuer: &str, redirect_url: &str) -> Self { Self { oidc_cliend_id: client_id.to_string(), oidc_client_secret: client_secret.to_string(), oidc_issuer: issuer.to_string(), redirect_url: redirect_url.to_string(), } } fn auth_url_internal( &self, provider_metadata: CoreProviderMetadata, ) -> Result<(Url, OpenIdClient, Nonce, PkceCodeVerifier)> { let client_id = ClientId::new(self.oidc_cliend_id.to_owned()); let client_secret = ClientSecret::new(self.oidc_client_secret.to_owned()); let (pkce_challenge, pkce_verifier) = PkceCodeChallenge::new_random_sha256(); let client = CoreClient::from_provider_metadata(provider_metadata, client_id, Some(client_secret)) .set_redirect_uri( RedirectUrl::new(self.redirect_url.to_owned()).expect("Invalid redirect URL"), ); let (authorize_url, _, nonce) = client .authorize_url( CoreAuthenticationFlow::AuthorizationCode, CsrfToken::new_random, Nonce::new_random, ) .add_scope(Scope::new("email".to_string())) .set_pkce_challenge(pkce_challenge) .url(); Ok((authorize_url, client, nonce, pkce_verifier)) } #[cfg(not(target_arch = "wasm32"))] pub fn auth_url(&self) -> Result<(Url, OpenIdClient, Nonce, PkceCodeVerifier)> { let http_client = SyncReqwestClient( reqwest::blocking::ClientBuilder::new() // Following redirects opens the client up to SSRF vulnerabilities. .redirect(reqwest::redirect::Policy::none()) .build()?, ); let issuer = IssuerUrl::new(self.oidc_issuer.to_owned()).expect("Missing the OIDC_ISSUER."); let provider_metadata = CoreProviderMetadata::discover(&issuer, &http_client).map_err(|err| { error!("Error is: {:?}", err); SigstoreError::ClaimsVerificationError })?; self.auth_url_internal(provider_metadata) } pub async fn auth_url_async(&self) -> Result<(Url, OpenIdClient, Nonce, PkceCodeVerifier)> { let async_http_client = AsyncReqwestClient( reqwest::ClientBuilder::new() // Following redirects opens the client up to SSRF vulnerabilities. .redirect(reqwest::redirect::Policy::none()) .build()?, ); let issuer = IssuerUrl::new(self.oidc_issuer.to_owned()).expect("Missing the OIDC_ISSUER."); let provider_metadata = CoreProviderMetadata::discover_async(issuer, &async_http_client) .await .map_err(|err| { error!("Error is: {:?}", err); SigstoreError::ClaimsVerificationError })?; self.auth_url_internal(provider_metadata) } } pub struct RedirectListener { client_redirect_host: String, client: OpenIdClient, nonce: Nonce, pkce_verifier: PkceCodeVerifier, } impl RedirectListener { //! Create a new RedirectListener struct //! //! # Arguments //! //! * `client_redirect_host` - The client callback host IP:PORT //! * `client` - CoreClient instance (returned from OpenIDAuthorize) //! * `nonce` - Nonce (returned from OpenIDAuthorize) //! * `pkce_verifier` - client redirect URL //! # Example //! //! ```rust,ignore //! use sigstore::oauth::openidflow::RedirectListener; //! //! let oidc = RedirectListener::new("127.0.0.1:8080", client, nonce, pkce_verifier).redirect_listener_async().await; //! ``` pub fn new( client_redirect_host: &str, client: OpenIdClient, nonce: Nonce, pkce_verifier: PkceCodeVerifier, ) -> Self { Self { client_redirect_host: client_redirect_host.to_string(), client, nonce, pkce_verifier, } } fn redirect_listener_internal(&self) -> Result { let listener = TcpListener::bind(self.client_redirect_host.clone())?; #[allow(clippy::manual_flatten)] for stream in listener.incoming() { if let Ok(mut stream) = stream { let code; { let mut reader = BufReader::new(&stream); let mut request_line = String::new(); reader.read_line(&mut request_line)?; let client_redirect_host = request_line .split_whitespace() .nth(1) .ok_or(SigstoreError::RedirectUrlRequestLineError)?; let url = Url::parse(format!("http://localhost{client_redirect_host}").as_str())?; let code_pair = url .query_pairs() .find(|pair| { let (key, _) = pair; key == "code" }) .ok_or(SigstoreError::CodePairError)?; let (_, value) = code_pair; code = AuthorizationCode::new(value.into_owned()); } let html_page = r#" Sigstore Auth

Sigstore Auth Successful

You may now close this page.

"#; let response = format!( "HTTP/1.1 200 OK\r\ncontent-length: {}\r\n\r\n{}", html_page.len(), html_page ); stream.write_all(response.as_bytes())?; return Ok(code); } } Err(SigstoreError::CodePairError) } #[cfg(not(target_arch = "wasm32"))] pub fn redirect_listener(self) -> Result<(CoreIdTokenClaims, CoreIdToken)> { let http_client = SyncReqwestClient( reqwest::blocking::ClientBuilder::new() // Following redirects opens the client up to SSRF vulnerabilities. .redirect(reqwest::redirect::Policy::none()) .build()?, ); let code = self.redirect_listener_internal()?; let token_response = self .client .exchange_code(code)? .set_pkce_verifier(self.pkce_verifier) .request(&http_client) .map_err(|_| SigstoreError::ClaimsAccessPointError)?; Self::extract_token_and_claims( &token_response, &self.client.id_token_verifier(), self.nonce, ) } pub async fn redirect_listener_async(self) -> Result<(CoreIdTokenClaims, CoreIdToken)> { let async_http_client = AsyncReqwestClient( reqwest::ClientBuilder::new() // Following redirects opens the client up to SSRF vulnerabilities. .redirect(reqwest::redirect::Policy::none()) .build()?, ); let code = self.redirect_listener_internal()?; let token_response = self .client .exchange_code(code)? .set_pkce_verifier(self.pkce_verifier) .request_async(&async_http_client) .await .map_err(|_| SigstoreError::ClaimsAccessPointError)?; Self::extract_token_and_claims( &token_response, &self.client.id_token_verifier(), self.nonce, ) } fn extract_token_and_claims( token_response: &CoreTokenResponse, id_token_verifier: &CoreIdTokenVerifier, nonce: Nonce, ) -> Result<(CoreIdTokenClaims, CoreIdToken)> { let id_token = token_response .extra_fields() .id_token() .ok_or(SigstoreError::NoIDToken)?; let id_token_claims: &CoreIdTokenClaims = token_response .extra_fields() .id_token() .expect("Server did not return an ID token") .claims(id_token_verifier, &nonce) .map_err(|_| SigstoreError::ClaimsVerificationError)?; Ok((id_token_claims.clone(), id_token.clone())) } } #[cfg(test)] mod tests { use super::*; fn assert_auth_url(url: &str) { assert!(url.contains("https://oauth2.sigstore.dev/auth")); assert!(url.contains("response_type=code")); assert!(url.contains("client_id=sigstore")); assert!(url.contains("scope=openid+email")); } #[cfg(not(target_arch = "wasm32"))] #[test] fn test_auth_url() { let oidc_url = OpenIDAuthorize::new( "sigstore", "some_secret", "https://oauth2.sigstore.dev/auth", "http://localhost:8080", ) .auth_url() .unwrap(); assert_auth_url(oidc_url.0.to_string().as_str()); } #[tokio::test] async fn test_auth_url_async() { let oidc_url = OpenIDAuthorize::new( "sigstore", "some_secret", "https://oauth2.sigstore.dev/auth", "http://localhost:8080", ) .auth_url_async() .await .unwrap(); assert_auth_url(oidc_url.0.to_string().as_str()); } } ================================================ FILE: src/oauth/token.rs ================================================ // Copyright 2023 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use chrono::{DateTime, Utc}; use openidconnect::core::CoreIdToken; use serde::Deserialize; use base64::{Engine as _, engine::general_purpose::STANDARD_NO_PAD as base64}; use crate::errors::SigstoreError; #[derive(Deserialize)] pub struct Claims { pub aud: String, #[serde(with = "chrono::serde::ts_seconds")] pub exp: DateTime, #[serde(with = "chrono::serde::ts_seconds_option")] #[serde(default)] pub nbf: Option>, pub email: String, } pub type UnverifiedClaims = Claims; /// A Sigstore token. pub struct IdentityToken { original_token: String, claims: UnverifiedClaims, } impl IdentityToken { /// Returns the **unverified** claim set for the token. /// /// The [UnverifiedClaims] returned from this method should not be used to enforce security /// invariants. pub fn unverified_claims(&self) -> &UnverifiedClaims { &self.claims } /// Returns whether or not this token is within its self-stated validity period. pub fn in_validity_period(&self) -> bool { let now = Utc::now(); if let Some(nbf) = self.claims.nbf { nbf <= now && now < self.claims.exp } else { now < self.claims.exp } } } impl TryFrom<&str> for IdentityToken { type Error = SigstoreError; fn try_from(value: &str) -> Result { let parts: [&str; 3] = value.split('.').collect::>().try_into().or(Err( SigstoreError::IdentityTokenError("Malformed JWT".into()), ))?; let claims = base64 .decode(parts[1]) .or(Err(SigstoreError::IdentityTokenError( "Malformed JWT: Unable to decode claims".into(), )))?; let claims: Claims = serde_json::from_slice(&claims).or(Err( SigstoreError::IdentityTokenError("Malformed JWT: claims JSON malformed".into()), ))?; if claims.aud != "sigstore" { return Err(SigstoreError::IdentityTokenError( "Not a Sigstore JWT".into(), )); } Ok(IdentityToken { original_token: value.to_owned(), claims, }) } } impl From for IdentityToken { fn from(value: CoreIdToken) -> Self { value .to_string() .as_str() .try_into() .expect("Token conversion failed") } } impl std::fmt::Display for IdentityToken { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.original_token.clone()) } } ================================================ FILE: src/registry/config.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Set of structs and enums used to define how to interact with OCI registries use pki_types::CertificateDer; use serde::Serialize; use std::cmp::Ordering; use crate::errors; /// A method for authenticating to a registry #[derive(Serialize, Debug)] pub enum Auth { /// Access the registry anonymously Anonymous, /// Access the registry using HTTP Basic authentication Basic(String, String), /// Access the registry using a bearer token Bearer(String), } impl From<&Auth> for oci_client::secrets::RegistryAuth { fn from(auth: &Auth) -> Self { match auth { Auth::Anonymous => oci_client::secrets::RegistryAuth::Anonymous, Auth::Basic(username, pass) => { oci_client::secrets::RegistryAuth::Basic(username.clone(), pass.clone()) } Auth::Bearer(token) => oci_client::secrets::RegistryAuth::Bearer(token.clone()), } } } impl From<&oci_client::secrets::RegistryAuth> for Auth { fn from(auth: &oci_client::secrets::RegistryAuth) -> Self { match auth { oci_client::secrets::RegistryAuth::Anonymous => Auth::Anonymous, oci_client::secrets::RegistryAuth::Basic(username, pass) => { Auth::Basic(username.clone(), pass.clone()) } oci_client::secrets::RegistryAuth::Bearer(token) => Auth::Bearer(token.clone()), } } } /// The protocol that the client should use to connect #[derive(Debug, Clone, Default, PartialEq, Eq)] pub enum ClientProtocol { #[allow(missing_docs)] Http, #[allow(missing_docs)] #[default] Https, #[allow(missing_docs)] HttpsExcept(Vec), } impl From for oci_client::client::ClientProtocol { fn from(cp: ClientProtocol) -> Self { match cp { ClientProtocol::Http => oci_client::client::ClientProtocol::Http, ClientProtocol::Https => oci_client::client::ClientProtocol::Https, ClientProtocol::HttpsExcept(exceptions) => { oci_client::client::ClientProtocol::HttpsExcept(exceptions) } } } } /// The encoding of the certificate #[derive(Debug, Clone, PartialEq, Eq)] pub enum CertificateEncoding { #[allow(missing_docs)] Der, #[allow(missing_docs)] Pem, } impl From for oci_client::client::CertificateEncoding { fn from(ce: CertificateEncoding) -> Self { match ce { CertificateEncoding::Der => oci_client::client::CertificateEncoding::Der, CertificateEncoding::Pem => oci_client::client::CertificateEncoding::Pem, } } } /// A x509 certificate #[derive(Debug, Clone, Eq, PartialEq)] pub struct Certificate { /// Which encoding is used by the certificate pub encoding: CertificateEncoding, /// Actual certificate pub data: Vec, } impl Ord for Certificate { fn cmp(&self, other: &Self) -> Ordering { self.data.cmp(&other.data) } } impl PartialOrd for Certificate { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } impl From<&Certificate> for oci_client::client::Certificate { fn from(cert: &Certificate) -> Self { oci_client::client::Certificate { encoding: cert.encoding.clone().into(), data: cert.data.clone(), } } } impl<'a> TryFrom for CertificateDer<'a> { type Error = errors::SigstoreError; fn try_from(value: Certificate) -> errors::Result> { #[inline] fn to_der(pem: &[u8]) -> errors::Result> { Ok(pem::parse(pem)?.into_contents()) } match &value.encoding { CertificateEncoding::Der => Ok(CertificateDer::from(value.data)), CertificateEncoding::Pem => Ok(CertificateDer::from(to_der(&value.data)?)), } } } /// A client configuration #[derive(Debug, Clone)] pub struct ClientConfig { /// Which protocol the client should use pub protocol: ClientProtocol, /// Accept invalid hostname. Defaults to false #[cfg_attr(docsrs, doc(cfg(feature = "native-tls")))] #[cfg(feature = "native-tls")] pub accept_invalid_hostnames: bool, /// Accept invalid certificates. Defaults to false pub accept_invalid_certificates: bool, /// A list of extra root certificate to trust. This can be used to connect /// to servers using self-signed certificates pub extra_root_certificates: Vec, /// Set the `HTTPS PROXY` used by the client. /// /// This defaults to `None`. pub https_proxy: Option, /// Set the `HTTP PROXY` used by the client. /// /// This defaults to `None`. pub http_proxy: Option, /// Set the `NO PROXY` used by the client. /// /// This defaults to `None`. pub no_proxy: Option, } impl Default for ClientConfig { fn default() -> Self { ClientConfig { protocol: ClientProtocol::Https, #[cfg(feature = "native-tls")] accept_invalid_hostnames: false, accept_invalid_certificates: false, extra_root_certificates: Vec::new(), https_proxy: None, http_proxy: None, no_proxy: None, } } } impl From for oci_client::client::ClientConfig { fn from(config: ClientConfig) -> Self { oci_client::client::ClientConfig { protocol: config.protocol.into(), accept_invalid_certificates: config.accept_invalid_certificates, #[cfg(feature = "native-tls")] accept_invalid_hostnames: config.accept_invalid_hostnames, extra_root_certificates: config .extra_root_certificates .iter() .map(|c| c.into()) .collect(), https_proxy: config.https_proxy, http_proxy: config.http_proxy, no_proxy: config.no_proxy, ..Default::default() } } } /// A client configuration #[derive(Debug, Clone)] pub struct PushResponse { /// Pullable url for the config. pub config_url: String, /// Pullable url for the manifest. pub manifest_url: String, } impl From for oci_client::client::PushResponse { fn from(pr: PushResponse) -> Self { oci_client::client::PushResponse { config_url: pr.config_url, manifest_url: pr.manifest_url, } } } impl From for PushResponse { fn from(pr: oci_client::client::PushResponse) -> Self { PushResponse { config_url: pr.config_url, manifest_url: pr.manifest_url, } } } ================================================ FILE: src/registry/mod.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. pub mod config; pub use config::*; #[cfg(feature = "cosign")] pub(crate) mod oci_client; #[cfg(feature = "cosign")] pub(crate) use oci_client::*; #[cfg(feature = "cosign")] pub mod oci_reference; #[cfg(feature = "cosign")] pub use oci_reference::OciReference; #[cfg(all(feature = "cosign", feature = "cached-client"))] pub(crate) mod oci_caching_client; #[cfg(all(feature = "cosign", feature = "cached-client"))] pub(crate) use oci_caching_client::*; use crate::errors::Result; use async_trait::async_trait; use ::oci_client as oci_client_dep; /// Workaround to ensure the `Send + Sync` supertraits are /// required by ClientCapabilities only when the target /// architecture is NOT wasm32. /// /// This intermediate trait has been created to avoid /// to define ClientCapabilities twice (one with `#[cfg(target_arch = "wasm32")]`, /// the other with `#[cfg(not(target_arch = "wasm32"))]` #[cfg(not(target_arch = "wasm32"))] pub(crate) trait ClientCapabilitiesDeps: Send + Sync {} /// Workaround to ensure the `Send + Sync` supertraits are /// required by ClientCapabilities only when the target /// architecture is NOT wasm32. /// /// This intermediate trait has been created to avoid /// to define ClientCapabilities twice (one with `#[cfg(target_arch = "wasm32")]`, /// the other with `#[cfg(not(target_arch = "wasm32"))]` #[cfg(target_arch = "wasm32")] pub(crate) trait ClientCapabilitiesDeps {} #[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] /// Capabilities that are expected to be provided by a registry client pub(crate) trait ClientCapabilities: ClientCapabilitiesDeps { async fn fetch_manifest_digest( &mut self, image: &oci_client_dep::Reference, auth: &oci_client_dep::secrets::RegistryAuth, ) -> Result; async fn pull( &mut self, image: &oci_client_dep::Reference, auth: &oci_client_dep::secrets::RegistryAuth, accepted_media_types: Vec<&str>, ) -> Result; async fn pull_manifest( &mut self, image: &oci_client_dep::Reference, auth: &oci_client_dep::secrets::RegistryAuth, ) -> Result<(oci_client_dep::manifest::OciManifest, String)>; async fn push( &mut self, image_ref: &oci_client_dep::Reference, layers: &[oci_client_dep::client::ImageLayer], config: oci_client_dep::client::Config, auth: &oci_client_dep::secrets::RegistryAuth, manifest: Option, ) -> Result; } ================================================ FILE: src/registry/oci_caching_client.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use super::{ClientCapabilities, ClientCapabilitiesDeps}; use crate::errors::{Result, SigstoreError}; use async_trait::async_trait; use cached::proc_macro::cached; use serde::Serialize; use sha2::{Digest, Sha256}; use tracing::{debug, error}; /// Internal client for an OCI Registry. This performs actual /// calls against the remote registry and caches the results /// for 60 seconds. /// /// For testing purposes, use instead the client inside of the /// `mock_client` module. pub(crate) struct OciCachingClient { pub registry_client: oci_client::Client, } #[cached( time = 60, result = true, sync_writes = "default", key = "String", convert = r#"{ format!("{}", image) }"#, with_cached_flag = true )] async fn fetch_manifest_digest_cached( client: &mut oci_client::Client, image: &oci_client::Reference, auth: &oci_client::secrets::RegistryAuth, ) -> Result> { client .fetch_manifest_digest(image, auth) .await .map_err(|e| SigstoreError::RegistryFetchManifestError { image: image.whole(), error: e.to_string(), }) .map(cached::Return::new) } /// Internal struct, used to calculate a unique hash of the pull /// settings. This is required to cache pull results. #[derive(Serialize, Debug)] struct PullSettings<'a> { image: String, auth: super::config::Auth, pub accepted_media_types: Vec<&'a str>, } impl<'a> PullSettings<'a> { fn new( image: &oci_client::Reference, auth: &oci_client::secrets::RegistryAuth, accepted_media_types: Vec<&'a str>, ) -> PullSettings<'a> { let image_str = image.whole(); let auth_sigstore: super::config::Auth = From::from(auth); PullSettings { image: image_str, auth: auth_sigstore, accepted_media_types, } } #[allow(clippy::unwrap_used)] pub fn image(&self) -> oci_client::Reference { // we can use `unwrap` here, because this will never fail let reference: oci_client::Reference = self.image.parse().unwrap(); reference } pub fn auth(&self) -> oci_client::secrets::RegistryAuth { let internal_auth: &super::config::Auth = &self.auth; let a: oci_client::secrets::RegistryAuth = internal_auth.into(); a } // This function returns a hash of the PullSettings struct. // The has is computed by doing a canonical JSON representation of // the struct. // // This method cannot error, because its value is used by the `cached` // macro, which doesn't allow error handling. // Because of that the method will return the '0' value when something goes // wrong during the serialization operation. This is very unlikely to happen pub fn hash(&self) -> String { let buf = match serde_json_canonicalizer::to_vec(self) { Ok(vec) => vec, Err(e) => { error!(err=?e, settings=?self, "Cannot perform canonical serialization"); return "0".to_string(); } }; let mut hasher = Sha256::new(); hasher.update(&buf); let result = hasher.finalize(); result .iter() .map(|v| format!("{v:x}")) .collect::>() .join("") } } // Pulls an OCI artifact. // Details about this cache: // * the cache is time bound: cached values are purged after 60 seconds // * only successful results are cached #[cached( time = 60, result = true, sync_writes = "default", key = "String", convert = r#"{ settings.hash() }"#, with_cached_flag = true )] async fn pull_cached( client: &mut oci_client::Client, settings: PullSettings<'_>, ) -> Result> { let auth = settings.auth(); let image = settings.image(); client .pull(&image, &auth, settings.accepted_media_types) .await .map_err(|e| SigstoreError::RegistryPullError { image: image.whole(), error: e.to_string(), }) .map(cached::Return::new) } /// Internal struct, used to calculate a unique hash of the pull manifest /// settings. This is required to cache pull manifest results. #[derive(Serialize, Debug)] struct PullManifestSettings { image: String, auth: super::config::Auth, } impl PullManifestSettings { fn new( image: &oci_client::Reference, auth: &oci_client::secrets::RegistryAuth, ) -> PullManifestSettings { let image_str = image.whole(); let auth_sigstore: super::config::Auth = From::from(auth); PullManifestSettings { image: image_str, auth: auth_sigstore, } } #[allow(clippy::unwrap_used)] pub fn image(&self) -> oci_client::Reference { // we can use `unwrap` here, because this will never fail let reference: oci_client::Reference = self.image.parse().unwrap(); reference } pub fn auth(&self) -> oci_client::secrets::RegistryAuth { let internal_auth: &super::config::Auth = &self.auth; let a: oci_client::secrets::RegistryAuth = internal_auth.into(); a } // This function returns a hash of the PullManifestSettings struct. // The has is computed by doing a canonical JSON representation of // the struct. // // This method cannot error, because its value is used by the `cached` // macro, which doesn't allow error handling. // Because of that the method will return the '0' value when something goes // wrong during the serialization operation. This is very unlikely to happen pub fn hash(&self) -> String { let buf = match serde_json_canonicalizer::to_vec(self) { Ok(vec) => vec, Err(e) => { error!(err=?e, settings=?self, "Cannot perform canonical serialization"); return "0".to_string(); } }; let mut hasher = Sha256::new(); hasher.update(&buf); let result = hasher.finalize(); result .iter() .map(|v| format!("{v:x}")) .collect::>() .join("") } } // Pulls an OCI manifest. // Details about this cache: // * the cache is time bound: cached values are purged after 60 seconds // * only successful results are cached #[cached( time = 60, result = true, sync_writes = "default", key = "String", convert = r#"{ settings.hash() }"#, with_cached_flag = true )] async fn pull_manifest_cached( client: &mut oci_client::Client, settings: PullManifestSettings, ) -> Result> { let image = settings.image(); let auth = settings.auth(); client .pull_manifest(&image, &auth) .await .map_err(|e| SigstoreError::RegistryPullManifestError { image: image.whole(), error: e.to_string(), }) .map(cached::Return::new) } impl ClientCapabilitiesDeps for OciCachingClient {} #[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[cfg_attr(target_arch = "wasm32", async_trait(? Send))] impl ClientCapabilities for OciCachingClient { async fn fetch_manifest_digest( &mut self, image: &oci_client::Reference, auth: &oci_client::secrets::RegistryAuth, ) -> Result { fetch_manifest_digest_cached(&mut self.registry_client, image, auth) .await .map(|digest| { if digest.was_cached { debug!(?image, "Got image digest from cache"); } else { debug!(?image, "Got image digest by querying remote registry"); } digest.value }) } async fn pull( &mut self, image: &oci_client::Reference, auth: &oci_client::secrets::RegistryAuth, accepted_media_types: Vec<&str>, ) -> Result { let pull_settings = PullSettings::new(image, auth, accepted_media_types); pull_cached(&mut self.registry_client, pull_settings) .await .map(|data| { if data.was_cached { debug!(?image, "Got image data from cache"); } else { debug!(?image, "Got image data by querying remote registry"); } data.value }) } async fn pull_manifest( &mut self, image: &oci_client::Reference, auth: &oci_client::secrets::RegistryAuth, ) -> Result<(oci_client::manifest::OciManifest, String)> { let pull_manifest_settings = PullManifestSettings::new(image, auth); pull_manifest_cached(&mut self.registry_client, pull_manifest_settings) .await .map(|data| { if data.was_cached { debug!(?image, "Got image manifest from cache"); } else { debug!(?image, "Got image manifest by querying remote registry"); } data.value }) } async fn push( &mut self, image_ref: &oci_client::Reference, layers: &[oci_client::client::ImageLayer], config: oci_client::client::Config, auth: &oci_client::secrets::RegistryAuth, manifest: Option, ) -> Result { self.registry_client .push(image_ref, layers, config, auth, manifest) .await .map_err(|e| SigstoreError::RegistryPushError { image: image_ref.whole(), error: e.to_string(), }) } } ================================================ FILE: src/registry/oci_client.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use super::{ClientCapabilities, ClientCapabilitiesDeps}; use crate::errors::{Result, SigstoreError}; use async_trait::async_trait; /// Internal client for an OCI Registry. This performs actual /// calls against the remote registry.OciClient /// /// For testing purposes, use instead the client inside of the /// `mock_client` module. pub(crate) struct OciClient { pub registry_client: oci_client::Client, } impl ClientCapabilitiesDeps for OciClient {} #[cfg_attr(not(target_arch = "wasm32"), async_trait)] #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] impl ClientCapabilities for OciClient { async fn fetch_manifest_digest( &mut self, image: &oci_client::Reference, auth: &oci_client::secrets::RegistryAuth, ) -> Result { self.registry_client .fetch_manifest_digest(image, auth) .await .map_err(|e| SigstoreError::RegistryFetchManifestError { image: image.whole(), error: e.to_string(), }) } async fn pull( &mut self, image: &oci_client::Reference, auth: &oci_client::secrets::RegistryAuth, accepted_media_types: Vec<&str>, ) -> Result { self.registry_client .pull(image, auth, accepted_media_types) .await .map_err(|e| SigstoreError::RegistryPullError { image: image.whole(), error: e.to_string(), }) } async fn pull_manifest( &mut self, image: &oci_client::Reference, auth: &oci_client::secrets::RegistryAuth, ) -> Result<(oci_client::manifest::OciManifest, String)> { self.registry_client .pull_manifest(image, auth) .await .map_err(|e| SigstoreError::RegistryPullManifestError { image: image.whole(), error: e.to_string(), }) } async fn push( &mut self, image_ref: &oci_client::Reference, layers: &[oci_client::client::ImageLayer], config: oci_client::client::Config, auth: &oci_client::secrets::RegistryAuth, manifest: Option, ) -> Result { self.registry_client .push(image_ref, layers, config, auth, manifest) .await .map_err(|e| SigstoreError::RegistryPushError { image: image_ref.whole(), error: e.to_string(), }) } } ================================================ FILE: src/registry/oci_reference.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::errors::SigstoreError; use std::fmt::{Display, Formatter}; use std::str::FromStr; /// `OciReference` provides a general type to represent any way of referencing images within an OCI registry. #[derive(Debug, Clone, PartialEq)] pub struct OciReference { pub(crate) oci_reference: oci_client::Reference, } impl FromStr for OciReference { type Err = SigstoreError; fn from_str(s: &str) -> Result { s.parse::() .map_err(|_| SigstoreError::OciReferenceNotValidError { reference: s.to_string(), }) .map(|oci_reference| OciReference { oci_reference }) } } impl OciReference { /// Create a Reference with a registry, repository and tag. pub fn with_tag(registry: String, repository: String, tag: String) -> Self { OciReference { oci_reference: oci_client::Reference::with_tag(registry, repository, tag), } } /// Create a Reference with a registry, repository and digest. pub fn with_digest(registry: String, repository: String, digest: String) -> Self { OciReference { oci_reference: oci_client::Reference::with_digest(registry, repository, digest), } } /// Resolve the registry address of a given Reference. /// /// Some registries, such as docker.io, uses a different address for the actual /// registry. This function implements such redirection. pub fn resolve_registry(&self) -> &str { self.oci_reference.resolve_registry() } /// registry returns the name of the registry. pub fn registry(&self) -> &str { self.oci_reference.registry() } /// repository returns the name of the repository pub fn repository(&self) -> &str { self.oci_reference.repository() } /// digest returns the object's digest, if present. pub fn digest(&self) -> Option<&str> { self.oci_reference.digest() } /// tag returns the object's tag, if present. pub fn tag(&self) -> Option<&str> { self.oci_reference.tag() } /// whole returns the whole reference. pub fn whole(&self) -> String { self.oci_reference.whole() } } impl Display for OciReference { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { self.oci_reference.fmt(f) } } ================================================ FILE: src/rekor/apis/configuration.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ const VERSION: Option<&str> = option_env!("CARGO_PKG_VERSION"); #[derive(Debug, Clone)] pub struct Configuration { pub base_path: String, pub user_agent: Option, pub client: reqwest::Client, pub basic_auth: Option, pub oauth_access_token: Option, pub bearer_access_token: Option, pub api_key: Option, // TODO: take an oauth2 token source, similar to the go one } pub type BasicAuth = (String, Option); #[derive(Debug, Clone)] pub struct ApiKey { pub prefix: Option, pub key: String, } impl Configuration { pub fn new() -> Configuration { Configuration::default() } } impl Default for Configuration { fn default() -> Self { Configuration { base_path: "https://rekor.sigstore.dev".to_owned(), user_agent: Some(format!("sigstore-rs/{}", VERSION.unwrap_or("unknown"))), client: reqwest::Client::new(), basic_auth: None, oauth_access_token: None, bearer_access_token: None, api_key: None, } } } ================================================ FILE: src/rekor/apis/entries_api.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use super::{Error, configuration}; use crate::rekor::apis::ResponseContent; use crate::rekor::models::log_entry::LogEntry; use serde::{Deserialize, Serialize}; use std::str::FromStr; /// struct for typed errors of method [`create_log_entry`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum CreateLogEntryError { Status400(crate::rekor::models::Error), Status409(crate::rekor::models::Error), DefaultResponse(crate::rekor::models::Error), UnknownValue(serde_json::Value), } /// struct for typed errors of method [`get_log_entry_by_index`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum GetLogEntryByIndexError { Status404(), DefaultResponse(crate::rekor::models::Error), UnknownValue(serde_json::Value), } /// struct for typed errors of method [`get_log_entry_by_uuid`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum GetLogEntryByUuidError { Status404(), DefaultResponse(crate::rekor::models::Error), UnknownValue(serde_json::Value), } /// struct for typed errors of method [`search_log_query`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum SearchLogQueryError { Status400(crate::rekor::models::Error), DefaultResponse(crate::rekor::models::Error), UnknownValue(serde_json::Value), } #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct LogEntries { entries: Vec, } // TEMPORARY: Formats the returned response such that it can be read into a struct // TODO: Remove once upstream issue around dynamic top level key is resolved: // https://github.com/sigstore/rekor/issues/808 pub fn parse_response(local_var_content: String) -> String { let uuid: &str = &local_var_content[1..82]; let rest: &str = &local_var_content[85..local_var_content.len() - 2]; "{\"uuid\":".to_string() + uuid + "\"," + rest } /// Creates an entry in the transparency log for a detached signature, public key, and content. Items can be included in the request or fetched by the server when URLs are specified. // Change the return value of the function to LogEntry from ::std::collections::HashMap pub async fn create_log_entry( configuration: &configuration::Configuration, proposed_entry: crate::rekor::models::ProposedEntry, ) -> Result> { let local_var_client = &configuration.client; let local_var_uri_str = format!("{}/api/v1/log/entries", configuration.base_path); let mut local_var_req_builder = local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); if let Some(ref local_var_user_agent) = configuration.user_agent { local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); } local_var_req_builder = local_var_req_builder.json(&proposed_entry); let local_var_req = local_var_req_builder.build()?; let local_var_resp = local_var_client.execute(local_var_req).await?; let local_var_status = local_var_resp.status(); let local_var_content = local_var_resp.text().await?; if !local_var_status.is_client_error() && !local_var_status.is_server_error() { LogEntry::from_str(&(parse_response(local_var_content))).map_err(Error::from) } else { let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity, }; Err(Error::ResponseError(local_var_error)) } } /// Fetches the specified entry from the transparency log using the log index /// /// It queries the Rekor API for the specified log_index, and returns a [`LogEntry`]. /// /// Returns an error if the HTTP request fails, the response cannot be parsed, /// or the entry does not exist. pub async fn get_log_entry_by_index( configuration: &configuration::Configuration, log_index: i32, ) -> Result> { let local_var_client = &configuration.client; let local_var_uri_str = format!("{}/api/v1/log/entries", configuration.base_path); let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); local_var_req_builder = local_var_req_builder.query(&[("logIndex", &log_index.to_string())]); if let Some(ref local_var_user_agent) = configuration.user_agent { local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); } let local_var_req = local_var_req_builder.build()?; let local_var_resp = local_var_client.execute(local_var_req).await?; let local_var_status = local_var_resp.status(); let local_var_content = local_var_resp.text().await?; if !local_var_status.is_client_error() && !local_var_status.is_server_error() { LogEntry::from_str(&(parse_response(local_var_content))).map_err(Error::from) } else { let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity, }; Err(Error::ResponseError(local_var_error)) } } /// Fetches the specified entry from the transparency log using the log entry UUID. /// /// It queries the Rekor API for the specified log entry UUID (a hex-encoded hash), and returns a /// [`LogEntry`]. /// /// Returns an error if the HTTP request fails, the response cannot be parsed, /// or the entry does not exist. pub async fn get_log_entry_by_uuid( configuration: &configuration::Configuration, entry_uuid: &str, ) -> Result> { let local_var_client = &configuration.client; let local_var_uri_str = format!( "{}/api/v1/log/entries/{entryUUID}", configuration.base_path, entryUUID = crate::rekor::apis::urlencode(entry_uuid) ); let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); if let Some(ref local_var_user_agent) = configuration.user_agent { local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); } let local_var_req = local_var_req_builder.build()?; let local_var_resp = local_var_client.execute(local_var_req).await?; let local_var_status = local_var_resp.status(); let local_var_content = local_var_resp.text().await?; if !local_var_status.is_client_error() && !local_var_status.is_server_error() { LogEntry::from_str(&(parse_response(local_var_content))).map_err(Error::from) } else { let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity, }; Err(Error::ResponseError(local_var_error)) } } // Returns the vector of Log Entries as a String pub async fn search_log_query( configuration: &configuration::Configuration, entry: crate::rekor::models::SearchLogQuery, ) -> Result> { let local_var_client = &configuration.client; let local_var_uri_str = format!("{}/api/v1/log/entries/retrieve", configuration.base_path); let mut local_var_req_builder = local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); if let Some(ref local_var_user_agent) = configuration.user_agent { local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); } local_var_req_builder = local_var_req_builder.json(&entry); let local_var_req = local_var_req_builder.build()?; let local_var_resp = local_var_client.execute(local_var_req).await?; let local_var_status = local_var_resp.status(); let local_var_content = local_var_resp.text().await?; if !local_var_status.is_client_error() && !local_var_status.is_server_error() { Ok(local_var_content) } else { let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity, }; Err(Error::ResponseError(local_var_error)) } } ================================================ FILE: src/rekor/apis/index_api.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use super::{Error, configuration}; use crate::rekor::apis::ResponseContent; use serde::{Deserialize, Serialize}; /// struct for typed errors of method [`search_index`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum SearchIndexError { Status400(crate::rekor::models::Error), DefaultResponse(crate::rekor::models::Error), UnknownValue(serde_json::Value), } pub async fn search_index( configuration: &configuration::Configuration, query: crate::rekor::models::SearchIndex, ) -> Result, Error> { let local_var_configuration = configuration; let local_var_client = &local_var_configuration.client; let local_var_uri_str = format!( "{}/api/v1/index/retrieve", local_var_configuration.base_path ); let mut local_var_req_builder = local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); } local_var_req_builder = local_var_req_builder.json(&query); let local_var_req = local_var_req_builder.build()?; let local_var_resp = local_var_client.execute(local_var_req).await?; let local_var_status = local_var_resp.status(); let local_var_content = local_var_resp.text().await?; if !local_var_status.is_client_error() && !local_var_status.is_server_error() { serde_json::from_str(&local_var_content).map_err(Error::from) } else { let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity, }; Err(Error::ResponseError(local_var_error)) } } ================================================ FILE: src/rekor/apis/mod.rs ================================================ use thiserror::Error; #[derive(Debug, Clone)] pub struct ResponseContent { pub status: reqwest::StatusCode, pub content: String, pub entity: Option, } #[derive(Error, Debug)] pub enum Error { #[error("error in reqwest: {source:?}")] Reqwest { #[from] source: reqwest::Error, }, #[error("error in serde: {source:?}")] Serde { #[from] source: serde_json::Error, }, #[error("error in IO: {source:?}")] Io { #[from] source: std::io::Error, }, #[error("error in response: status code {:?}", error_status(.0))] ResponseError(ResponseContent), } #[inline] fn error_status(response: &ResponseContent) -> reqwest::StatusCode { response.status } pub fn urlencode>(s: T) -> String { ::url::form_urlencoded::byte_serialize(s.as_ref().as_bytes()).collect() } pub mod configuration; pub mod entries_api; pub mod index_api; pub mod pubkey_api; pub mod tlog_api; ================================================ FILE: src/rekor/apis/pubkey_api.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use super::{Error, configuration}; use crate::rekor::apis::ResponseContent; use serde::{Deserialize, Serialize}; /// struct for typed errors of method [`get_public_key`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum GetPublicKeyError { DefaultResponse(crate::rekor::models::Error), UnknownValue(serde_json::Value), } /// Returns the public key that can be used to validate the signed tree head pub async fn get_public_key( configuration: &configuration::Configuration, tree_id: Option<&str>, ) -> Result> { let local_var_configuration = configuration; let local_var_client = &local_var_configuration.client; let local_var_uri_str = format!("{}/api/v1/log/publicKey", local_var_configuration.base_path); let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); if let Some(ref local_var_str) = tree_id { local_var_req_builder = local_var_req_builder.query(&[("treeID", &local_var_str.to_string())]); } if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); } let local_var_req = local_var_req_builder.build()?; let local_var_resp = local_var_client.execute(local_var_req).await?; let local_var_status = local_var_resp.status(); let local_var_content = local_var_resp.text().await?; if !local_var_status.is_client_error() && !local_var_status.is_server_error() { Ok(local_var_content) } else { let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity, }; Err(Error::ResponseError(local_var_error)) } } ================================================ FILE: src/rekor/apis/tlog_api.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use super::{Error, configuration}; use crate::rekor::apis::ResponseContent; use serde::{Deserialize, Serialize}; /// struct for typed errors of method [`get_log_info`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum GetLogInfoError { DefaultResponse(crate::rekor::models::Error), UnknownValue(serde_json::Value), ConversionError(String), } /// struct for typed errors of method [`get_log_proof`] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(untagged)] pub enum GetLogProofError { Status400(crate::rekor::models::Error), DefaultResponse(crate::rekor::models::Error), UnknownValue(serde_json::Value), ConversionError(String), } /// Fetches the current state of the Rekor transparency log. /// /// It queries the Rekor API for the latest log information, and returns a /// [`LogInfo`](crate::rekor::models::LogInfo). /// /// Returns an error if the HTTP request fails, the response cannot be parsed, or the log info /// cannot be converted from the raw API format. pub async fn get_log_info( configuration: &configuration::Configuration, ) -> Result> { let local_var_configuration = configuration; let local_var_client = &local_var_configuration.client; let local_var_uri_str = format!("{}/api/v1/log", local_var_configuration.base_path); let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); } let local_var_req = local_var_req_builder.build()?; let local_var_resp = local_var_client.execute(local_var_req).await?; let local_var_status = local_var_resp.status(); let local_var_content = local_var_resp.text().await?; if !local_var_status.is_client_error() && !local_var_status.is_server_error() { let raw: crate::rekor::models::RekorLogInfo = serde_json::from_str(&local_var_content).map_err(Error::from)?; let proof = crate::rekor::models::LogInfo::try_from(raw).map_err(|e| { Error::ResponseError(ResponseContent { status: local_var_status, content: local_var_content.clone(), entity: Some(GetLogInfoError::ConversionError(e.to_string())), }) })?; Ok(proof) } else { let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity, }; Err(Error::ResponseError(local_var_error)) } } /// Fetches a Merkle consistency proof between two tree sizes from the Rekor transparency log. /// /// It queries the Rekor API for a consistency proof, returned in a /// [`ConsistencyProof`](crate::rekor::models::ConsistencyProof). /// /// # Arguments /// * `configuration` - Rekor API client configuration. /// * `last_size` - The size of the newer tree (as an integer). /// * `first_size` - The size of the older tree (as a string, optional). /// * `tree_id` - The tree ID to query (optional). /// /// Returns an error if the HTTP request fails, the response cannot be parsed, or the proof /// cannot be converted from the raw API format. pub async fn get_log_proof( configuration: &configuration::Configuration, last_size: i32, first_size: Option<&str>, tree_id: Option<&str>, ) -> Result> { let local_var_configuration = configuration; let local_var_client = &local_var_configuration.client; let local_var_uri_str = format!("{}/api/v1/log/proof", local_var_configuration.base_path); let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); if let Some(ref local_var_str) = first_size { local_var_req_builder = local_var_req_builder.query(&[("firstSize", &local_var_str.to_string())]); } local_var_req_builder = local_var_req_builder.query(&[("lastSize", &last_size.to_string())]); if let Some(ref local_var_str) = tree_id { local_var_req_builder = local_var_req_builder.query(&[("treeID", &local_var_str.to_string())]); } if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); } let local_var_req = local_var_req_builder.build()?; let local_var_resp = local_var_client.execute(local_var_req).await?; let local_var_status = local_var_resp.status(); let local_var_content = local_var_resp.text().await?; if !local_var_status.is_client_error() && !local_var_status.is_server_error() { let raw: crate::rekor::models::RekorConsistencyProof = serde_json::from_str(&local_var_content).map_err(Error::from)?; let proof = crate::rekor::models::ConsistencyProof::try_from(raw).map_err(|e| { Error::ResponseError(ResponseContent { status: local_var_status, content: local_var_content.clone(), entity: Some(GetLogProofError::ConversionError(e.to_string())), }) })?; Ok(proof) } else { let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity, }; Err(Error::ResponseError(local_var_error)) } } ================================================ FILE: src/rekor/mod.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! This crate aims to provide Rust API client for Rekor to Rust developers. //! //! Rekor is a cryptographically secure, immutable transparency log for signed software releases. //! //! **Warning:** this crate is still experimental. Its API can change at any time. //! //! # Security //! //! Should you discover any security issues, please refer to //! Sigstore's [security process](https://github.com/sigstore/community/blob/main/SECURITY.md). //! //! # How to use this crate //! The examples folder contains code that shows users how to make API calls. //! It also provides a clean interface with step-by-step instructions that other developers can copy and paste. //! //! ``` //! use clap::{Arg, Command}; //! use sigstore::rekor::apis::{configuration::Configuration, entries_api}; //! use sigstore::rekor::models::log_entry::LogEntry; //! use std::str::FromStr; //! #[tokio::main] //! async fn main() { //! /* //! Retrieves an entry and inclusion proof from the transparency log (if it exists) by index //! Example command : //! cargo run --example get_log_entry_by_index -- --log_index 99 //! */ //! let matches = Command::new("cmd").arg( //! Arg::new("log_index") //! .long("log_index") //! .num_args(1) //! .value_parser(clap::value_parser!(i32)) //! .default_value("1") //! .help("log_index of the artifact"), //! ); //! //! let flags = matches.get_matches(); //! let index: &i32 = flags.get_one("log_index").unwrap(); //! //! let configuration = Configuration::default(); //! //! let message: LogEntry = entries_api::get_log_entry_by_index(&configuration, *index) //! .await //! .unwrap(); //! println!("{:#?}", message); //! } //! ``` //! //! The following comment in the code tells the user how to provide the required values to the API calls using cli flags. //! //! In the example below, the user can retrieve different entries by inputting a different value for the log_index flag. //! //! //!/* //!Retrieves an entry and inclusion proof from the transparency log (if it exists) by index //!Example command : //!cargo run --example get_log_entry_by_index -- --log_index 99 //!*/ //! //! # The example code is provided for the following API calls: //! //!- create_log_entry //!- get_log_entry_by_index //!- get_log_entry_by_uuid //!- get_log_info //!- get_log_proof //!- get_public_key //!- get_timestamp_cert_chain //!- get_timestamp_response //!- search_index //!- search_log_query //! pub mod apis; pub mod models; type TreeSize = u64; ================================================ FILE: src/rekor/models/alpine.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; /// Alpine : Alpine package #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Alpine { #[serde(rename = "kind")] pub kind: String, #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl Alpine { /// Alpine package pub fn new(kind: String, api_version: String, spec: serde_json::Value) -> Alpine { Alpine { kind, api_version, spec, } } } ================================================ FILE: src/rekor/models/alpine_all_of.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct AlpineAllOf { #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl AlpineAllOf { pub fn new(api_version: String, spec: serde_json::Value) -> AlpineAllOf { AlpineAllOf { api_version, spec } } } ================================================ FILE: src/rekor/models/checkpoint.rs ================================================ use crate::crypto::merkle::{MerkleProofVerifier, Rfc6269Default}; use crate::crypto::{CosignVerificationKey, Signature}; use crate::errors::SigstoreError; use crate::errors::SigstoreError::ConsistencyProofError; use crate::rekor::models::checkpoint::ParseCheckpointError::*; use base64::Engine; use base64::prelude::BASE64_STANDARD; use digest::Output; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt::Write; use std::fmt::{Display, Formatter}; /// A checkpoint (also known as a signed tree head) that is served by the log. /// It represents the log state at a point in time. /// The `note` field stores this data, /// and its authenticity can be verified with the data in `signature`. #[derive(Debug, PartialEq, Clone, Eq)] pub struct SignedCheckpoint { pub note: Checkpoint, pub signatures: Vec, } /// The metadata that is contained in a checkpoint. #[derive(Debug, PartialEq, Clone, Eq)] pub struct Checkpoint { /// origin is the unique identifier/version string pub origin: String, /// merkle tree size pub size: u64, /// merkle tree root hash pub hash: [u8; 32], /// catches the rest of the content pub other_content: Vec, } /// The signature that is contained in a checkpoint. /// The `key_fingerprint` are the first four bytes of the key hash of the corresponding log public key. /// This can be used to identity the key which should be used to verify the checkpoint. /// The actual signature is stored in `raw`. #[derive(Debug, PartialEq, Clone, Eq)] pub struct CheckpointSignature { pub key_fingerprint: [u8; 4], pub raw: Vec, pub name: String, } /// Checkpoints can contain additional data. /// The `KeyValue` variant is for lines that are in the format `: `. /// Everything else is stored in the `Value` variant. #[derive(Debug, PartialEq, Clone, Eq)] pub enum OtherContent { KeyValue(String, String), Value(String), } impl Display for OtherContent { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { OtherContent::KeyValue(k, v) => write!(f, "{k}: {v}"), OtherContent::Value(v) => write!(f, "{v}"), } } } #[derive(Debug, Eq, PartialEq)] pub enum ParseCheckpointError { DecodeError(String), } impl SignedCheckpoint { // decode from format used by Rekor for envelopes (signed notes) // See https://github.com/transparency-dev/formats/blob/2de64aa755f08489bda36125786ced79688af872/log/README.md#signed-envelope pub(crate) fn decode(s: &str) -> Result { // refer to: https://github.com/sigstore/rekor/blob/d702f84e6b8b127662c5e717ee550de1242a6aec/pkg/util/signed_note.go let checkpoint = s.trim_start_matches('"').trim_end_matches('"'); let Some((note, sigs)) = checkpoint.split_once("\n\n") else { return Err(DecodeError("unexpected checkpoint format".to_string())); }; let signatures: Vec = sigs .split("\n\n") .filter(|s| !s.trim().is_empty()) .map(CheckpointSignature::decode) .collect::>()?; let note = Checkpoint::unmarshal(note)?; Ok(SignedCheckpoint { note, signatures }) } // encode into format used by Rekor for envelopes (signed notes) // See https://github.com/transparency-dev/formats/blob/2de64aa755f08489bda36125786ced79688af872/log/README.md#signed-envelope pub(crate) fn encode(&self) -> String { let note = self.note.marshal() + "\n"; let empty_line = "\n"; let signatures = self .signatures .iter() .map(|s| s.encode()) .collect::>() .join("\n"); format!("{note}{empty_line}{signatures}") } /// verify_signature checks that at least one of the signatures can be verified by the log /// with the public key `rekor_key` pub fn verify_signature(&self, rekor_key: &CosignVerificationKey) -> Result<(), SigstoreError> { for sig in &self.signatures { if rekor_key .verify_signature(Signature::Raw(&sig.raw), self.note.marshal().as_bytes()) .is_ok() { return Ok(()); } } Err(SigstoreError::CheckpointSignatureVerificationError) } /// Checks if the checkpoint (root hash) matches the Merkle root and tree size claimed by an /// inclusion or consistency proof. This prevents accepting proofs that claim to be for a /// different tree than the one actually signed by the log, even if they are correctly signed. /// This ensures a cryptographic linkage between the log's signed state and the proof being /// verified. pub(crate) fn is_valid_for_proof( &self, proof_root_hash: &Output, proof_tree_size: u64, ) -> Result<(), SigstoreError> { // Delegate implementation as trivial consistency proof. // the checkpoint and the proof claim the same tree size. According to RFC-6962, // if two tree sizes are equal, the only valid consistency proof is that their roots are // equal and the proof is empty: one doesn't need any hashes to prove consistency between // two identical trees, just check that the roots match. Rfc6269Default::verify_consistency( self.note.size, // checkpoint's tree size proof_tree_size, // proof's tree size &[], // empty proof_hashes &self.note.hash.into(), // checkpoint's root hash proof_root_hash, // proof's root hash ) .map_err(ConsistencyProofError) } } impl Checkpoint { /// marshals the note /// See https://github.com/transparency-dev/formats/blob/2de64aa755f08489bda36125786ced79688af872/log/README.md#checkpoint-body fn marshal(&self) -> String { let hash_b64 = BASE64_STANDARD.encode(self.hash); let other_content: String = self.other_content.iter().fold(String::new(), |mut acc, c| { writeln!(acc, "{c}").expect("failed to write to string"); acc }); format!( "{}\n{}\n{hash_b64}\n{other_content}", self.origin, self.size ) } /// unmarshal parses the common formatted note data and stores the result in a /// CheckpointNote fn unmarshal(s: &str) -> Result { // See https://github.com/transparency-dev/formats/blob/2de64aa755f08489bda36125786ced79688af872/log/README.md#checkpoint-body // The note is in the form: // // // // [other data] let split_note = s.split('\n').collect::>(); let [origin, size, hash_b64, other_content @ ..] = split_note.as_slice() else { return Err(DecodeError("note not in expected format".to_string())); }; if origin.trim().is_empty() { return Err(DecodeError("origin string must not be empty".to_string())); } let size = size .parse::() .map_err(|_| DecodeError("expected decimal string for size".into()))?; let hash = BASE64_STANDARD .decode(hash_b64) .map_err(|_| DecodeError("failed to decode root hash".to_string())) .and_then(|v| { <[u8; 32]>::try_from(v) .map_err(|_| DecodeError("expected 32-byte hash".to_string())) })?; let other_content = other_content .iter() .filter(|s| !s.is_empty()) .map(|s| { s.split_once(": ") .map(|(k, v)| OtherContent::KeyValue(k.to_string(), v.to_string())) .unwrap_or(OtherContent::Value(s.to_string())) }) .collect(); Ok(Checkpoint { origin: origin.to_string(), size, hash, other_content, }) } } impl Serialize for SignedCheckpoint { fn serialize(&self, serializer: S) -> Result where S: Serializer, { self.encode().serialize(serializer) } } impl<'de> Deserialize<'de> for SignedCheckpoint { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { ::deserialize(deserializer).and_then(|s| { SignedCheckpoint::decode(&s).map_err(|DecodeError(err)| serde::de::Error::custom(err)) }) } } impl CheckpointSignature { // encode into format used by Rekor for signed checkpoints (Signed Tree Heads) // in the sumdb note format `– ` // See https://github.com/transparency-dev/formats/blob/2de64aa755f08489bda36125786ced79688af872/log/README.md#signed-envelope fn encode(&self) -> String { let sig_b64 = BASE64_STANDARD.encode([self.key_fingerprint.as_slice(), self.raw.as_slice()].concat()); // line starts with an em dash ( \u{2014}) format!("\u{2014} {} {sig_b64}\n", self.name) } // decode from format used by Rekor for signed checkpoints (Signed Tree Heads) // in the sumdb note format `– ` // See https://github.com/transparency-dev/formats/blob/2de64aa755f08489bda36125786ced79688af872/log/README.md#signed-envelope fn decode(s: &str) -> Result { let s = s.trim_start_matches('\n').trim_end_matches('\n'); if !s.starts_with('\u{2014}') { return Err(DecodeError("signature line missing em dash".to_string())); } let [_emdash, name, sig_b64] = s.split(' ').collect::>()[..] else { return Err(DecodeError(format!("unexpected signature format {s:?}"))); }; let sig = BASE64_STANDARD .decode(sig_b64.trim_end()) .map_err(|_| DecodeError("failed to decode signature".to_string()))?; // first four bytes of signature are fingerprint of key let (key_fingerprint, sig) = sig .split_at_checked(4) .ok_or_else(|| DecodeError("unexpected signature length in checkpoint".to_string()))?; let key_fingerprint = key_fingerprint .try_into() .map_err(|_| DecodeError("unexpected signature length in checkpoint".to_string()))?; Ok(CheckpointSignature { key_fingerprint, name: name.to_string(), raw: sig.to_vec(), }) } } #[cfg(test)] mod test { #[cfg(test)] mod test_checkpoint_note { use crate::rekor::models::checkpoint::Checkpoint; use crate::rekor::models::checkpoint::OtherContent::{KeyValue, Value}; #[test] fn test_marshal() { let test_cases = [ ( "Log Checkpoint v0", 123, [0; 32], vec![], "Log Checkpoint v0\n123\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=\n", ), ( "Banana Checkpoint v5", 9944, [1; 32], vec![], "Banana Checkpoint v5\n9944\nAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQE=\n", ), ( "Banana Checkpoint v7", 9943, [2; 32], vec![Value("foo".to_string()), Value("bar".to_string())], "Banana Checkpoint v7\n9943\nAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI=\nfoo\nbar\n", ), ]; for (origin, size, hash, other_content, expected) in test_cases { assert_eq!( Checkpoint { size, origin: origin.to_string(), hash, other_content, } .marshal(), expected ); } } #[test] fn test_unmarshal_valid() { let test_cases = [ ( "valid", "Log Checkpoint v0", 123, [0; 32], vec![], "Log Checkpoint v0\n123\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=\n", ), ( "valid", "Banana Checkpoint v5", 9944, [1; 32], vec![], "Banana Checkpoint v5\n9944\nAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQE=\n", ), ( "valid with multiple trailing data lines", "Banana Checkpoint v7", 9943, [2; 32], vec![Value("foo".to_string()), Value("bar".to_string())], "Banana Checkpoint v7\n9943\nAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI=\nfoo\nbar\n", ), ( "valid with key-value data line", "Banana Checkpoint v7", 9943, [2; 32], vec![KeyValue( "Timestamp".to_string(), "1689748607742585419".to_string(), )], "Banana Checkpoint v7\n9943\nAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI=\nTimestamp: 1689748607742585419\n", ), ( "valid with trailing newlines", "Banana Checkpoint v7", 9943, [2; 32], vec![], "Banana Checkpoint v7\n9943\nAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI=\n\n\n\n", ), ]; for (desc, origin, size, hash, other_content, input) in test_cases { let got = Checkpoint::unmarshal(input); let expected = Checkpoint { size, origin: origin.to_string(), hash, other_content, }; assert_eq!(got, Ok(expected), "failed test case: {desc}"); } } #[test] fn test_unmarshal_invalid() { let test_cases = [ ("invalid - insufficient lines", "Head\n9944\n"), ( "invalid - empty header", "\n9944\ndGhlIHZpZXcgZnJvbSB0aGUgdHJlZSB0b3BzIGlzIGdyZWF0IQ==\n", ), ( "invalid - empty origin", "123\ndGhlIHZpZXcgZnJvbSB0aGUgdHJlZSB0b3BzIGlzIGdyZWF0IQ==\nother data\n", ), ( "invalid - missing newline on roothash", "Log Checkpoint v0\n123\nYmFuYW5hcw==", ), ( "invalid size - not a number", "Log Checkpoint v0\nbananas\ndGhlIHZpZXcgZnJvbSB0aGUgdHJlZSB0b3BzIGlzIGdyZWF0IQ==\n", ), ( "invalid size - negative", "Log Checkpoint v0\n-34\ndGhlIHZpZXcgZnJvbSB0aGUgdHJlZSB0b3BzIGlzIGdyZWF0IQ==\n", ), ( "invalid size - too large", "Log Checkpoint v0\n3438945738945739845734895735\ndGhlIHZpZXcgZnJvbSB0aGUgdHJlZSB0b3BzIGlzIGdyZWF0IQ==\n", ), ( "invalid roothash - not base64", "Log Checkpoint v0\n123\nThisIsn'tBase64\n", ), ]; for (desc, data) in test_cases { assert!( Checkpoint::unmarshal(data).is_err(), "accepted invalid note: {desc}" ); } } } #[cfg(test)] mod test_checkpoint_signature { use crate::rekor::models::checkpoint::{Checkpoint, CheckpointSignature, SignedCheckpoint}; #[test] fn test_to_string_valid_with_url_name() { let got = CheckpointSignature { name: "log.example.org".to_string(), key_fingerprint: [0; 4], raw: vec![1; 32], } .encode(); let expected = "— log.example.org AAAAAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEB\n"; assert_eq!(got, expected) } #[test] fn test_to_string_valid_with_id_name() { let got = CheckpointSignature { name: "815f6c60aab9".to_string(), key_fingerprint: [0; 4], raw: vec![1; 32], } .encode(); let expected = "— 815f6c60aab9 AAAAAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEB\n"; assert_eq!(got, expected) } #[test] fn test_from_str_valid_with_url_name() { let input = "— log.example.org AAAAAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEB\n"; let expected = CheckpointSignature { name: "log.example.org".to_string(), key_fingerprint: [0; 4], raw: vec![1; 32], }; let got = CheckpointSignature::decode(input); assert_eq!(got, Ok(expected)) } #[test] fn test_from_str_valid_with_id_name() { let input = "— 815f6c60aab9 AAAAAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEB\n"; let expected = CheckpointSignature { name: "815f6c60aab9".to_string(), key_fingerprint: [0; 4], raw: vec![1; 32], }; let got = CheckpointSignature::decode(input); assert_eq!(got, Ok(expected)) } #[test] fn test_from_str_valid_with_whitespace() { let input = "\n— log.example.org AAAAAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEB\n\n"; let expected = CheckpointSignature { name: "log.example.org".to_string(), key_fingerprint: [0; 4], raw: vec![1; 32], }; let got = CheckpointSignature::decode(input); assert_eq!(got, Ok(expected)) } #[test] fn test_from_str_invalid_with_spaces_in_name() { let input = "— Foo Bar AAAAAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEB\n"; let got = CheckpointSignature::decode(input); assert!(got.is_err()) } #[test] fn test_checkpoint_encode_decode_multiple_signatures() { let note = Checkpoint { origin: "Test Log".to_string(), size: 42, hash: [7; 32], other_content: vec![], }; let sig1 = CheckpointSignature { name: "log1.example.org".to_string(), key_fingerprint: [1, 2, 3, 4], raw: vec![5; 32], }; let sig2 = CheckpointSignature { name: "log2.example.org".to_string(), key_fingerprint: [9, 8, 7, 6], raw: vec![6; 32], }; let checkpoint = SignedCheckpoint { note: note.clone(), signatures: vec![sig1.clone(), sig2.clone()], }; let encoded = checkpoint.encode(); let decoded = SignedCheckpoint::decode(&encoded).expect("decode should succeed"); assert_eq!(decoded.note, note); assert_eq!(decoded.signatures.len(), 2); assert_eq!(decoded.signatures[0], sig1); assert_eq!(decoded.signatures[1], sig2); } } } ================================================ FILE: src/rekor/models/consistency_proof.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use crate::crypto::merkle::{MerkleProofVerifier, Rfc6269Default, hex_to_hash_output}; use crate::errors::SigstoreError; use crate::errors::SigstoreError::ConsistencyProofError; use serde::{Deserialize, Serialize}; use sha2::Sha256; use sha2::digest::Output; /// Used to deserialize responses of the Rekor API. #[derive(Clone, Serialize, Deserialize)] pub struct RekorConsistencyProof { /// The hash value in hex form stored at the root of the merkle tree at the time the proof was /// generated #[serde(rename = "rootHash")] pub root_hash: String, // the hashes stored in hex form #[serde(rename = "hashes")] pub hashes: Vec, } /// Represents a Merkle consistency proof for a transparency log. /// /// This struct is typically constructed from the log's API response in [`RekorConsistencyProof`]. /// /// It contains the hashes (as bytestring arrays) required to prove that a newer Merkle tree is an /// append-only extension of a previous tree, as well as the root hash of the tree at the time the /// proof was generated. This is used to verify the append-only property of the log between two /// tree sizes. #[derive(Clone, Debug, PartialEq, Eq, Default)] pub struct ConsistencyProof { /// The hash value, in bytestring, stored at the root of the merkle tree at the time the proof /// was generated pub root_hash: [u8; 32], // the hashes stored in bytestring form pub hashes: Vec<[u8; 32]>, } impl TryFrom for ConsistencyProof { type Error = crate::errors::SigstoreError; fn try_from(raw: RekorConsistencyProof) -> Result { // let root_hash = <[u8; 32]>::from_hex(&raw.root_hash)?; let root_hash = hex_to_hash_output(&raw.root_hash)?.into(); let hashes = raw .hashes .into_iter() .map(hex_to_hash_output) .map(|r| r.map(Into::into)) .collect::>()?; Ok(ConsistencyProof { root_hash, hashes }) } } impl ConsistencyProof { pub fn new(root_hash: [u8; 32], hashes: Vec<[u8; 32]>) -> ConsistencyProof { ConsistencyProof { root_hash, hashes } } /// Verify this consistency proof against the given parameters. /// If `new_root` is `Some` then this root will be used in the verification. If it is `None` /// then the root in `self.root_hash` is used. pub fn verify( &self, old_size: u64, old_root: &[u8; 32], new_size: u64, new_root: Option<&[u8; 32]>, ) -> Result<(), SigstoreError> { // convert hashes from bytestring and into Sha256 let proof_hashes: Vec> = self .hashes .iter() .map(|h| Output::::from(*h)) .collect(); let new_root = match new_root { Some(s) => s, None => &self.root_hash, }; Rfc6269Default::verify_consistency( old_size, new_size, &proof_hashes, old_root.into(), new_root.into(), ) .map_err(ConsistencyProofError) } } ================================================ FILE: src/rekor/models/error.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Error { #[serde(rename = "code", skip_serializing_if = "Option::is_none")] pub code: Option, #[serde(rename = "message", skip_serializing_if = "Option::is_none")] pub message: Option, } impl Error { pub fn new() -> Error { Error { code: None, message: None, } } } ================================================ FILE: src/rekor/models/hashedrekord.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64_STD_ENGINE}; use serde::{Deserialize, Serialize}; use crate::errors::SigstoreError; /// Hashedrekord : Hashed Rekord object #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct Hashedrekord { #[serde(rename = "kind")] pub kind: String, #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: Spec, } impl Hashedrekord { /// Hashed Rekord object pub fn new(kind: String, api_version: String, spec: Spec) -> Hashedrekord { Hashedrekord { kind, api_version, spec, } } } /// Stores the Signature and Data struct #[derive(Default, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Spec { pub signature: Signature, pub data: Data, } // Design a SPEC struct impl Spec { pub fn new(signature: Signature, data: Data) -> Spec { Spec { signature, data } } } /// Stores the signature format, signature of the artifact and the PublicKey struct #[derive(Default, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Signature { pub content: String, pub public_key: PublicKey, } impl Signature { pub fn new(content: String, public_key: PublicKey) -> Signature { Signature { content, public_key, } } } /// Stores the public key used to sign the artifact #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct PublicKey { content: String, } impl PublicKey { pub fn new(content: String) -> PublicKey { PublicKey { content } } pub fn decode(&self) -> Result { let decoded = BASE64_STD_ENGINE.decode(&self.content)?; String::from_utf8(decoded).map_err(|e| SigstoreError::from(e.utf8_error())) } } #[derive(Default, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Data { pub hash: Hash, } impl Data { pub fn new(hash: Hash) -> Data { Data { hash } } } #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[allow(non_camel_case_types)] pub enum AlgorithmKind { #[default] sha256, sha1, } /// Stores the algorithm used to hash the artifact and the value of the hash #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Hash { pub algorithm: AlgorithmKind, pub value: String, } impl Hash { pub fn new(algorithm: AlgorithmKind, value: String) -> Hash { Hash { algorithm, value } } } ================================================ FILE: src/rekor/models/hashedrekord_all_of.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct HashedrekordAllOf { #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl HashedrekordAllOf { pub fn new(api_version: String, spec: serde_json::Value) -> HashedrekordAllOf { HashedrekordAllOf { api_version, spec } } } ================================================ FILE: src/rekor/models/helm.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; /// Helm : Helm chart #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Helm { #[serde(rename = "kind")] pub kind: String, #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl Helm { /// Helm chart pub fn new(kind: String, api_version: String, spec: serde_json::Value) -> Helm { Helm { kind, api_version, spec, } } } ================================================ FILE: src/rekor/models/helm_all_of.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct HelmAllOf { #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl HelmAllOf { pub fn new(api_version: String, spec: serde_json::Value) -> HelmAllOf { HelmAllOf { api_version, spec } } } ================================================ FILE: src/rekor/models/inactive_shard_log_info.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use crate::rekor::TreeSize; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct InactiveShardLogInfo { /// The current hash value stored at the root of the merkle tree #[serde(rename = "rootHash")] pub root_hash: String, /// The current number of nodes in the merkle tree #[serde(rename = "treeSize")] pub tree_size: TreeSize, /// The current signed tree head #[serde(rename = "signedTreeHead")] pub signed_tree_head: String, /// The current treeID #[serde(rename = "treeID")] pub tree_id: String, } impl InactiveShardLogInfo { pub fn new( root_hash: String, tree_size: TreeSize, signed_tree_head: String, tree_id: String, ) -> InactiveShardLogInfo { InactiveShardLogInfo { root_hash, tree_size, signed_tree_head, tree_id, } } } ================================================ FILE: src/rekor/models/inclusion_proof.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use crate::crypto::CosignVerificationKey; use crate::crypto::merkle::{MerkleProofVerifier, Rfc6269Default, Rfc6269HasherTrait}; use crate::errors::SigstoreError; use crate::errors::SigstoreError::{InclusionProofError, UnexpectedError}; use crate::rekor::TreeSize; use crate::rekor::models::checkpoint::SignedCheckpoint; use sha2::Sha256; use sha2::digest::Output; use serde::{Deserialize, Serialize}; /// Stores the signature over the artifact's logID, logIndex, body and integratedTime. #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct InclusionProof { /// The index of the entry in the transparency log #[serde(rename = "logIndex")] pub log_index: i64, /// The hash value stored at the root of the merkle tree at the time the proof was generated #[serde(rename = "rootHash")] pub root_hash: [u8; 32], /// The size of the merkle tree at the time the inclusion proof was generated #[serde(rename = "treeSize")] pub tree_size: TreeSize, /// A list of hashes required to compute the inclusion proof, sorted in order from leaf to root #[serde(rename = "hashes")] pub hashes: Vec<[u8; 32]>, /// A snapshot of the transparency log's state at a specific point in time, /// in [Signed Note format]. /// /// [Signed Note format]: https://github.com/transparency-dev/formats/blob/main/log/README.md pub checkpoint: Option, } impl InclusionProof { pub fn new( log_index: i64, root_hash: [u8; 32], tree_size: TreeSize, hashes: Vec<[u8; 32]>, checkpoint: Option, ) -> InclusionProof { InclusionProof { log_index, root_hash, tree_size, hashes, checkpoint, } } /// Verify that the canonically encoded `entry` is included in the log, /// and the included checkpoint was signed by the log. pub fn verify( &self, entry: &[u8], rekor_key: &CosignVerificationKey, ) -> Result<(), SigstoreError> { // enforce that there is a checkpoint let checkpoint = self.checkpoint.as_ref().ok_or(UnexpectedError( "inclusion proof misses checkpoint".to_string(), ))?; // verify the checkpoint signature checkpoint.verify_signature(rekor_key)?; // check if the inclusion and checkpoint match checkpoint.is_valid_for_proof(&self.root_hash.into(), self.tree_size)?; let entry_hash = Rfc6269Default::hash_leaf(entry); // convert hashes from bytestring and into Sha256 let proof_hashes: Vec> = self .hashes .iter() .map(|h| Output::::from(*h)) .collect(); Rfc6269Default::verify_inclusion( self.log_index as u64, &entry_hash, self.tree_size, &proof_hashes, &self.root_hash.into(), ) .map_err(InclusionProofError) } } ================================================ FILE: src/rekor/models/intoto.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; /// Intoto : Intoto object #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Intoto { #[serde(rename = "kind")] pub kind: String, #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl Intoto { /// Intoto object pub fn new(kind: String, api_version: String, spec: serde_json::Value) -> Intoto { Intoto { kind, api_version, spec, } } } ================================================ FILE: src/rekor/models/intoto_all_of.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct IntotoAllOf { #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl IntotoAllOf { pub fn new(api_version: String, spec: serde_json::Value) -> IntotoAllOf { IntotoAllOf { api_version, spec } } } ================================================ FILE: src/rekor/models/jar.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ /// Jar : Java Archive (JAR) use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Jar { #[serde(rename = "kind")] pub kind: String, #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl Jar { /// Java Archive (JAR) pub fn new(kind: String, api_version: String, spec: serde_json::Value) -> Jar { Jar { kind, api_version, spec, } } } ================================================ FILE: src/rekor/models/jar_all_of.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct JarAllOf { #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl JarAllOf { pub fn new(api_version: String, spec: serde_json::Value) -> JarAllOf { JarAllOf { api_version, spec } } } ================================================ FILE: src/rekor/models/log_entry.rs ================================================ // // Copyright 2023 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::errors::SigstoreError; use crate::rekor::TreeSize; use base64::{Engine as _, engine::general_purpose::STANDARD as BASE64_STD_ENGINE}; use crate::crypto::CosignVerificationKey; use crate::crypto::merkle::hex_to_hash_output; use crate::errors::SigstoreError::UnexpectedError; use crate::rekor::models::InclusionProof; use crate::rekor::models::checkpoint::SignedCheckpoint; use serde::{Deserialize, Serialize}; use serde_json::{Error, Value, json}; use std::collections::HashMap; use std::str::FromStr; use super::{ AlpineAllOf, HashedrekordAllOf, HelmAllOf, IntotoAllOf, JarAllOf, RekordAllOf, Rfc3161AllOf, RpmAllOf, TufAllOf, }; /// Stores the response returned by Rekor after making a new entry #[derive(Default, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] #[serde(deny_unknown_fields, rename_all = "camelCase")] pub struct LogEntry { #[serde(skip_serializing_if = "Option::is_none")] pub uuid: Option, #[serde(skip_serializing_if = "Option::is_none")] pub attestation: Option, pub body: Body, pub integrated_time: i64, pub log_i_d: String, pub log_index: i64, pub verification: Verification, } impl FromStr for LogEntry { type Err = Error; fn from_str(s: &str) -> Result { let mut log_entry_map = serde_json::from_str::>(s)?; log_entry_map.entry("body").and_modify(|body| { let decoded_body = serde_json::to_value( decode_body(body.as_str().expect("Failed to parse Body")) .expect("Failed to decode Body"), ) .expect("Serialization failed"); *body = json!(decoded_body); }); let log_entry_str = serde_json::to_string(&log_entry_map)?; Ok(serde_json::from_str::(&log_entry_str).expect("Serialization failed")) } } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(tag = "kind")] #[allow(non_camel_case_types)] pub enum Body { alpine(AlpineAllOf), helm(HelmAllOf), jar(JarAllOf), rfc3161(Rfc3161AllOf), rpm(RpmAllOf), tuf(TufAllOf), intoto(IntotoAllOf), hashedrekord(HashedrekordAllOf), rekord(RekordAllOf), } impl Default for Body { fn default() -> Self { Self::hashedrekord(Default::default()) } } fn decode_body(s: &str) -> Result { let decoded = BASE64_STD_ENGINE.decode(s)?; serde_json::from_slice(&decoded).map_err(SigstoreError::from) } #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Attestation { // This field is just a place holder // Not sure what is stored inside the Attestation struct, it is empty for now #[serde(skip_serializing_if = "Option::is_none")] dummy: Option, } /// Stores the signature over the artifact's logID, logIndex, body and integratedTime. #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Verification { #[serde(skip_serializing_if = "Option::is_none")] pub inclusion_proof: Option, pub signed_entry_timestamp: String, } impl LogEntry { /// Verifies that the log entry was included by a log in possession of `rekor_key`. /// /// Example: /// ```rust,no_run /// use sigstore::rekor::apis::configuration::Configuration; /// use sigstore::rekor::apis::pubkey_api::get_public_key; /// use sigstore::rekor::apis::tlog_api::get_log_info; /// use sigstore::crypto::{CosignVerificationKey, SigningScheme}; /// #[tokio::main] /// async fn main() { /// use sigstore::rekor::apis::entries_api::get_log_entry_by_index; /// let rekor_config = Configuration::default(); /// // Important: in practice obtain the rekor key via TUF repo or another secure channel! /// let rekor_key = get_public_key(&rekor_config, None) /// .await /// .expect("failed to fetch pubkey from remote log"); /// let rekor_key = CosignVerificationKey::from_pem( /// rekor_key.as_bytes(), /// &SigningScheme::ECDSA_P256_SHA256_ASN1, /// ).expect("failed to parse rekor key"); /// /// // fetch log info and then the most recent entry /// let log_info = get_log_info(&rekor_config) /// .await /// .expect("failed to fetch log info"); /// let entry = get_log_entry_by_index(&rekor_config, (log_info.tree_size - 1) as i32) /// .await.expect("failed to fetch log entry"); /// entry.verify_inclusion(&rekor_key) /// .expect("failed to verify inclusion"); /// } /// ``` pub fn verify_inclusion(&self, rekor_key: &CosignVerificationKey) -> Result<(), SigstoreError> { let api_proof = self .verification .inclusion_proof .as_ref() .ok_or_else(|| UnexpectedError("missing inclusion proof".to_string()))?; let proof = InclusionProof::try_from(api_proof) .map_err(|e| UnexpectedError(format!("Failed to convert inclusion proof: {e}")))?; let buf = serde_json_canonicalizer::to_vec(&self.body).map_err(|e| { SigstoreError::UnexpectedError(format!( "Cannot create canonical JSON representation of body: {e:?}" )) })?; proof.verify(&buf, rekor_key) } } /// Stores the signature over the artifact's logID, logIndex, body and integratedTime. /// /// This struct is used for API (de)serialization in queries to Rekor #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RekorInclusionProof { pub hashes: Vec, pub log_index: i64, pub root_hash: String, pub tree_size: TreeSize, /// A snapshot of the transparency log's state at a specific point in time, /// in [Signed Note format]. /// /// [Signed Note format]: https://github.com/transparency-dev/formats/blob/main/log/README.md pub checkpoint: String, } impl TryFrom<&RekorInclusionProof> for InclusionProof { type Error = crate::errors::SigstoreError; fn try_from(api: &RekorInclusionProof) -> Result { let hashes = api .hashes .iter() .map(hex_to_hash_output) .map(|r| r.map(Into::into)) .collect::>()?; let root_hash = hex_to_hash_output(&api.root_hash)?; let checkpoint = if api.checkpoint.is_empty() { None } else { Some( SignedCheckpoint::decode(&api.checkpoint) .map_err(|e| SigstoreError::ParseCheckpointError(format!("{:?}", e)))?, ) }; Ok(InclusionProof { hashes, log_index: api.log_index, root_hash: root_hash.into(), tree_size: api.tree_size, checkpoint, }) } } #[cfg(test)] mod tests { use std::str::FromStr; use crate::crypto::{CosignVerificationKey, SigningScheme}; use super::LogEntry; const LOG_ENTRY: &str = r#" { "body": "eyJhcGlWZXJzaW9uIjoiMC4wLjEiLCJraW5kIjoiaGFzaGVkcmVrb3JkIiwic3BlYyI6eyJkYXRhIjp7Imhhc2giOnsiYWxnb3JpdGhtIjoic2hhMjU2IiwidmFsdWUiOiI0N2MxZGI5ZmI1ZmU3ZmY2NmUzZDdjMTViMmNhNWQzYTA0NmVlOGY0YWEwNDNkZWRkMzE3ZTQ2YjMyMWM0MzkwIn19LCJzaWduYXR1cmUiOnsiY29udGVudCI6Ik1FWUNJUURVell6d3o4SEdhVXRXNUwvb0VNNGc1MFVvSUtzNXhuV1B0amFyeHRKckxBSWhBTzkwRTl2NGd5MmZUcytJbHM4OFczOXhldEUzS3NqRHN0cXF6NXNQMGVITSIsInB1YmxpY0tleSI6eyJjb250ZW50IjoiTFMwdExTMUNSVWRKVGlCRFJWSlVTVVpKUTBGVVJTMHRMUzB0Q2sxSlNVTkRWRU5EUVdFclowRjNTVUpCWjBsSFFWbEhjMEZMUVhkTlFXOUhRME54UjFOTk5EbENRVTFEVFVOdmVFUlVRVXhDWjA1V1FrRk5UVUpJVW13S1l6TlJlRWRVUVZoQ1owNVdRa0Z2VFVWSVVteGpNMUZuV1RKV2VXUkhiRzFoVjA1b1pFZFZkMGhvWTA1TmFrbDNUbXBKTkUxcVFYbFBSRlY0VjJoalRncE5ha2wzVG1wSk5FMXFRVEJQUkZWNFYycEJjVTFSTUhkRGQxbEVWbEZSUkVSQlVqQmFXRTR3VFZKcmQwWjNXVVJXVVZGTFJFSkNNRnBZVGpCSlIwNXNDbU51VW5CYWJXeHFXVmhTYkUxR2EzZEZkMWxJUzI5YVNYcHFNRU5CVVZsSlMyOWFTWHBxTUVSQlVXTkVVV2RCUlVSQ1VISnBNMEp3VlhZNVRYRndVMlFLWlVoWlJXVjRZM3BqV0RKWmRHRkJXRGxDVjB4VVkyVm9Za2MxUnpkUFVGcHNVekZ2Y0hWRldXMVViVEJhY2pKTmNXcHBiV05xTHpjNFpFSTJNbUpFWWdwSlMwcDZTbUZQUW5kRVEwSjJWRUZrUW1kT1ZraFJORVZHWjFGVlFXcHBSMUJFUWsxSFNXSTFZVEp3YUhkeU1VVTJURXBtVTJGdmQwaDNXVVJXVWpCcUNrSkNaM2RHYjBGVldWTldPV1V5TjFKVmN6TTViRTg1VWsxTVlXaGtZVzV0V1VaM2QwUm5XVVJXVWpCUVFWRklMMEpCVVVSQloyVkJUVUpOUjBFeFZXUUtTbEZSVFUxQmIwZERRM05IUVZGVlJrSjNUVVJOUVhkSFFURlZaRVYzUlVJdmQxRkRUVUZCZDBkM1dVUldVakJTUVZGSUwwSkNSWGRFTkVWT1pFZFdlZ3BrUlVJd1dsaE9NRXh0VG5aaVZFRnlRbWR2Y2tKblJVVkJXVTh2VFVGRlFrSkNNVzlrU0ZKM1kzcHZka3d5V21oaE1sWm9XVEpPZG1SWE5UQmplVFV3Q2xwWVRqQk1iVTUyWWxSQlMwSm5aM0ZvYTJwUFVGRlJSRUZuVGtsQlJFSkdRV2xCVXpWTVZHeHlXak54Vm5aUGIyVjBibGh4V21JdmEzcEVURWRhYXpNS1MySkJTMGhMYmpkemFqQkZabEZKYUVGT05uTldVRTlyWlU1SlVYYzJlVEJNUVhNMVlrbGFXVkExUVVoTWFFUm9SRTlhZG1Od1lWUlhaek5xQ2kwdExTMHRSVTVFSUVORlVsUkpSa2xEUVZSRkxTMHRMUzBLIn19fX0=", "integratedTime": 1656448131, "logID": "d32f30a3c32d639c2b762205a21c7bb07788e68283a4ae6f42118723a1bea496", "logIndex": 1688, "verification": { "inclusionProof": { "hashes": [ "810320ec3029914695826d60133c67021f66ee0cfb09a6f79eb267ed9f55de2c", "67e9d9f66f0ad388f7e1a20991e9a2ae3efad5cbf281e8b3d2aaf1ef99a4618c", "16a106400c53465f6e18c2475df6ba889ca30f5667bacf32b1a5661f14a5080c", "b4439e8d71edbc96271723cb7a969dd725e23e73d139361864a62ed76ce8dc11", "49b3e90806c7b63b5a86f5748e3ecb7d264ea0828eb74a45bc1a2cd7962408e8", "5059ad9b48fa50bd9adcbff0dd81c5a0dcb60f37e0716e723a33805a464f72f8", "6c2ce64219799e61d72996884eee9e19fb906e4d7fa04b71625fde4108f21762", "784f79c817abb78db3ae99b6c1ede640470bf4bb678673a05bf3a6b50aaaddd6", "c6d92ebf4e10cdba500ca410166cd0a8d8b312154d2f45bc4292d63dea6112f6", "1768732027401f6718b0df7769e2803127cfc099eb130a8ed7d913218f6a65f6", "0da021f68571b65e49e926e4c69024de3ac248a1319d254bc51a85a657b93c33", "bc8cf0c8497d5c24841de0c9bef598ec99bbd59d9538d58568340646fe289e9a", "be328fa737b8fa9461850b8034250f237ff5b0b590b9468e6223968df294872b", "6f06f4025d0346f04830352b23f65c8cd9e3ce4b8cb899877c35282521ddaf85" ], "logIndex": 1227, "rootHash": "effa4fa4575f72829016a64e584441203de533212f9470d63a56d1992e73465d", "treeSize": 14358, "checkpoint": "rekor.sigstage.dev - 108574341321668964\n14358\n7/pPpFdfcoKQFqZOWERBID3lMyEvlHDWOlbRmS5zRl0=\n\n— rekor.sigstage.dev 0y8wozBFAiB8OkuzdwlL6/rDEu2CsIfqmesaH/KLfmIMvlH3YTdIYgIhAPFZeXK6+b0vbWy4GSU/YZxiTpFrrzjsVOShN4LlPdZb\n" }, "signedEntryTimestamp": "MEUCIQCO8dFvolJwFZDHkhkSdsW3Ny+07fG8CF7G32feG8NJMgIgd2qfJ5shezuXX8I1S6DsudvIZ8xN/+y95at/V5xHfEQ=" } } "#; /// Pubkey for `rekor.sigstage.dev`. const REKOR_STAGING_KEY_PEM: &str = r#" -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEDODRU688UYGuy54mNUlaEBiQdTE9 nYLr0lg6RXowI/QV/RE1azBn4Eg5/2uTOMbhB1/gfcHzijzFi9Tk+g1Prg== -----END PUBLIC KEY----- "#; #[test] fn test_inclusion_proof_valid() { let entry = LogEntry::from_str(LOG_ENTRY).expect("failed to parse log entry"); let rekor_key = CosignVerificationKey::from_pem( REKOR_STAGING_KEY_PEM.as_bytes(), &SigningScheme::ECDSA_P256_SHA256_ASN1, ) .expect("failed to parse Rekor key"); entry .verify_inclusion(&rekor_key) .expect("rejected valid inclusion proof"); } #[test] fn test_inclusion_proof_missing_proof() { let mut entry = LogEntry::from_str(LOG_ENTRY).expect("failed to parse log entry"); entry.verification.inclusion_proof = None; let rekor_key = CosignVerificationKey::from_pem( REKOR_STAGING_KEY_PEM.as_bytes(), &SigningScheme::ECDSA_P256_SHA256_ASN1, ) .expect("failed to parse Rekor key"); entry .verify_inclusion(&rekor_key) .expect_err("accepted invalid inclusion proof"); } #[test] fn test_inclusion_proof_modified_proof() { let entry = LogEntry::from_str(LOG_ENTRY).expect("failed to parse log entry"); let rekor_key = CosignVerificationKey::from_pem( REKOR_STAGING_KEY_PEM.as_bytes(), &SigningScheme::ECDSA_P256_SHA256_ASN1, ) .expect("failed to parse Rekor key"); let mut test_cases = vec![]; // swap upper and lower halves of a hash. let mut entry_modified_hashes = entry.clone(); entry_modified_hashes .verification .inclusion_proof .as_mut() .unwrap() .hashes[0] = "1f66ee0cfb09a6f79eb267ed9f55de2c810320ec3029914695826d60133c6702".to_string(); test_cases.push((entry_modified_hashes, "modified hash")); // modify checkpoint. let mut entry_modified_checkpoint = entry.clone(); entry_modified_checkpoint .verification .inclusion_proof .as_mut() .unwrap() .checkpoint = "foo".to_string(); test_cases.push((entry_modified_checkpoint, "modified checkpoint")); // modify log index. let mut entry_modified_log_index = entry.clone(); entry_modified_log_index .verification .inclusion_proof .as_mut() .unwrap() .log_index += 1; test_cases.push((entry_modified_log_index, "modified log index")); // modify root hash. let mut entry_modified_root_hash = entry.clone(); entry_modified_root_hash .verification .inclusion_proof .as_mut() .unwrap() .root_hash = "3de533212f9470d63a56d1992e73465deffa4fa4575f72829016a64e58444120".to_string(); test_cases.push((entry_modified_root_hash, "modified root hash")); // modify tree size. let mut entry_modified_tree_size = entry.clone(); entry_modified_tree_size .verification .inclusion_proof .as_mut() .unwrap() .tree_size += 1; test_cases.push((entry_modified_tree_size, "modified tree size")); for (case, desc) in test_cases { let res = case.verify_inclusion(&rekor_key); assert!(res.is_err(), "accepted invalid proof: {desc}"); } } } ================================================ FILE: src/rekor/models/log_info.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use crate::crypto::CosignVerificationKey; use crate::crypto::merkle::hex_to_hash_output; use crate::errors::SigstoreError; use crate::rekor::TreeSize; use crate::rekor::models::ConsistencyProof; use crate::rekor::models::checkpoint::SignedCheckpoint; use serde::{Deserialize, Serialize}; /// Used to deserialize responses of the Rekor API. #[derive(Serialize, Deserialize)] pub struct RekorLogInfo { /// The current hash value stored at the root of the merkle tree #[serde(rename = "rootHash")] pub root_hash: String, /// The current number of nodes in the merkle tree #[serde(rename = "treeSize")] pub tree_size: TreeSize, /// The current signed tree head #[serde(rename = "signedTreeHead")] pub signed_tree_head: SignedCheckpoint, /// The current treeID #[serde(rename = "treeID")] pub tree_id: Option, /// Optional list of inactive shards that may still be valid for auditing purposes #[serde(rename = "inactiveShards", skip_serializing_if = "Option::is_none")] pub inactive_shards: Option>, } /// LogInfo represents the current state of a Rekor transparency log. /// /// This struct is typically constructed from the log's API response in [`RekorLogInfo`]. /// /// Used to verify log consistency, inclusion proofs, and auditing the log's /// append-only property. #[derive(Clone, Debug, PartialEq, Eq)] pub struct LogInfo { /// The current hash value, in bytestring, stored at the root of the merkle tree pub root_hash: [u8; 32], /// The current number of nodes in the merkle tree pub tree_size: TreeSize, /// The current signed tree head pub signed_tree_head: SignedCheckpoint, /// The current treeID pub tree_id: Option, /// Optional list of inactive shards that may still be valid for auditing purposes pub inactive_shards: Option>, } impl TryFrom for LogInfo { type Error = crate::errors::SigstoreError; fn try_from(raw: RekorLogInfo) -> Result { Ok(LogInfo { root_hash: hex_to_hash_output(&raw.root_hash)?.into(), tree_size: raw.tree_size, signed_tree_head: raw.signed_tree_head, tree_id: raw.tree_id, inactive_shards: raw.inactive_shards, }) } } impl LogInfo { pub fn new( root_hash: [u8; 32], tree_size: TreeSize, signed_tree_head: SignedCheckpoint, ) -> LogInfo { LogInfo { root_hash, tree_size, signed_tree_head, tree_id: None, inactive_shards: None, } } /// Verify the consistency of the proof provided by the log. /// /// Example: /// ```rust /// use sigstore::crypto::{CosignVerificationKey, SigningScheme}; /// use sigstore::rekor::apis::configuration::Configuration; /// use sigstore::rekor::apis::pubkey_api::get_public_key; /// use sigstore::rekor::apis::tlog_api::{get_log_info, get_log_proof}; /// /// #[tokio::main] /// async fn main() { /// let rekor_config = Configuration::default(); /// /// // Important: in practice obtain the rekor key via TUF repo or another secure channel! /// let rekor_key = get_public_key(&rekor_config, None) /// .await /// .expect("failed to fetch pubkey from remote log"); /// let rekor_key = CosignVerificationKey::from_pem( /// rekor_key.as_bytes(), /// &SigningScheme::ECDSA_P256_SHA256_ASN1, /// ).expect("failed to parse rekor key"); /// // fetch log info twice and run consistency proof /// let log_info1 = get_log_info(&rekor_config) /// .await /// .expect("failed to fetch data from remote"); /// let log_info2 = get_log_info(&rekor_config) /// .await /// .expect("failed to fetch data from remote"); /// /// // get a proof using log_info1 as the previous tree state /// let proof = get_log_proof( /// &rekor_config, /// log_info2.tree_size as _, /// Some(&log_info1.tree_size.to_string()), /// None, /// ) /// .await /// .expect("failed to fetch data from remote"); /// /// // verify proof for the new log info /// log_info2 /// .verify_consistency(log_info1.tree_size, &log_info1.root_hash, &proof, &rekor_key) /// .expect("failed to verify log consistency"); /// } /// /// ``` pub fn verify_consistency( &self, old_size: u64, old_root: &[u8; 32], consistency_proof: &ConsistencyProof, rekor_key: &CosignVerificationKey, ) -> Result<(), SigstoreError> { // verify checkpoint is signed by log self.signed_tree_head.verify_signature(rekor_key)?; self.signed_tree_head .is_valid_for_proof(&self.root_hash.into(), self.tree_size)?; consistency_proof.verify( old_size, old_root, self.tree_size as _, Some(&self.root_hash), )?; Ok(()) } } #[cfg(test)] mod tests { use crate::{ crypto::{CosignVerificationKey, SigningScheme}, rekor::models::{ConsistencyProof, RekorConsistencyProof}, }; use super::{LogInfo, RekorLogInfo}; const LOG_INFO_OLD: &str = r#" { "inactiveShards": [ { "rootHash": "ed4cb79f98642c7cd7626f8307d8fee48e04991dc4e827611884f131e53221ba", "signedTreeHead": "rekor.sigstage.dev - 8959784741570461564\n461\n7Uy3n5hkLHzXYm+DB9j+5I4EmR3E6CdhGITxMeUyIbo=\n\n— rekor.sigstage.dev 0y8wozBFAiBeSutKae/1zsGfMgCstDexSktqVfYgAKYaFNsBqYQ3cAIhAOewsY+B/oXGOILSBv3wduhlyn4wNmV3v1eRg3LOwHDi\n", "treeID": "8959784741570461564", "treeSize": 461 }, { "rootHash": "effa4fa4575f72829016a64e584441203de533212f9470d63a56d1992e73465d", "signedTreeHead": "rekor.sigstage.dev - 108574341321668964\n14358\n7/pPpFdfcoKQFqZOWERBID3lMyEvlHDWOlbRmS5zRl0=\n\n— rekor.sigstage.dev 0y8wozBFAiBJlYY/wJQw6hW3LzziTAp7SXjc7MfghJ31tiydO1MvrAIhAPCX7LQ5jUNOssRDFJPXX3DdQjdan+8UGrKzGgfayV0c\n", "treeID": "108574341321668964", "treeSize": 14358 }, { "rootHash": "ae6af751ddcfffc1b77386692d7eaa9b105c191cb613fad3e718183513b956f1", "signedTreeHead": "rekor.sigstage.dev - 8050909264565447525\n31667593\nrmr3Ud3P/8G3c4ZpLX6qmxBcGRy2E/rT5xgYNRO5VvE=\n\n— rekor.sigstage.dev 0y8wozBFAiEA6yozMl9lFn21m5mQHCJUyEiI3HOOuM5sIeVt/MU2MQMCIBDhFtWjwPKIjFSr/liQ8LY7K6LHQRvtzkoIrsWZ/c9a\n", "treeID": "8050909264565447525", "treeSize": 31667593 } ], "rootHash": "e222aa53db49893334fb5a878ead1bf8b9f8f3c02ccfc0ae687f28256bd74907", "signedTreeHead": "rekor.sigstage.dev - 8202293616175992157\n1352760\n4iKqU9tJiTM0+1qHjq0b+Ln488Asz8CuaH8oJWvXSQc=\n\n— rekor.sigstage.dev 0y8wozBFAiEAnIjdHAH9uhqBrRNBA4bMaKR30H6qdzW4TAsdB0/KP0ICIDjK9VeE+9dWXSAm/B0aPkhO7pJMLmKPjo9btFD9ZvEs\n", "treeID": "8202293616175992157", "treeSize": 1352760 }"#; const LOG_INFO_NEW: &str = r#" { "inactiveShards": [ { "rootHash": "ed4cb79f98642c7cd7626f8307d8fee48e04991dc4e827611884f131e53221ba", "signedTreeHead": "rekor.sigstage.dev - 8959784741570461564\n461\n7Uy3n5hkLHzXYm+DB9j+5I4EmR3E6CdhGITxMeUyIbo=\n\n— rekor.sigstage.dev 0y8wozBFAiEAvtvC/roj8MxqTqvyHaq5pVHQ4eWJwNb/BpMNGLrjPdYCIB5rWm8b1FCsnVUty27Gyvod3PB9MgG6ar24XDYrNSau\n", "treeID": "8959784741570461564", "treeSize": 461 }, { "rootHash": "effa4fa4575f72829016a64e584441203de533212f9470d63a56d1992e73465d", "signedTreeHead": "rekor.sigstage.dev - 108574341321668964\n14358\n7/pPpFdfcoKQFqZOWERBID3lMyEvlHDWOlbRmS5zRl0=\n\n— rekor.sigstage.dev 0y8wozBFAiEA5zsLKvJeAuSc61IxVqNKnyVA0FIOZFck/cQl1BoYj0kCICMOJUulfDbukn5ApybPKUJ20nsFQ0P/54ku3/bl0Thq\n", "treeID": "108574341321668964", "treeSize": 14358 }, { "rootHash": "ae6af751ddcfffc1b77386692d7eaa9b105c191cb613fad3e718183513b956f1", "signedTreeHead": "rekor.sigstage.dev - 8050909264565447525\n31667593\nrmr3Ud3P/8G3c4ZpLX6qmxBcGRy2E/rT5xgYNRO5VvE=\n\n— rekor.sigstage.dev 0y8wozBEAiBok3nxMEarLtLkNJFCq+4A3r1givc2YZqO48quIGEOrgIgUGJwm2+yr59SH/Vmf7+XxPY/mMIuyXlP6OXDdnHglF0=\n", "treeID": "8050909264565447525", "treeSize": 31667593 } ], "rootHash": "c7d98fcf73e06fb3b7a6c02648dee52567a4b7b6db1dae31ec723283b379c782", "signedTreeHead": "rekor.sigstage.dev - 8202293616175992157\n1352764\nx9mPz3Pgb7O3psAmSN7lJWekt7bbHa4x7HIyg7N5x4I=\n\n— rekor.sigstage.dev 0y8wozBGAiEAiU8vSPj7yujJ2R6ES8t2AXJG+uezCj5Th7Dp6U5kBU0CIQCDObTWELwMeAa0u1VndfB+WvXEXKtYTNm5QXzK7d7xhA==\n", "treeID": "8202293616175992157", "treeSize": 1352764 }"#; /// Consistency proof requested via: https://rekor.sigstage.dev/api/v1/log/proof?lastSize=1352764&firstSize=1352760&treeId=8202293616175992157 const LOG_PROOF: &str = r#" { "hashes": [ "2713ba8ade1872a38adf7d108e5cedf5056fbde30c6d19fcc10f965e9fc1373e", "2197a8f07628339739e65c2cc1d16fd36ccca1ef980d5966de82259a56821145", "bc6015344bdfce14a2d24d4230ae734002220557f7a930c8fbc17e1e3e86b692", "156bdcfc96e73a81f2255c4e05936ef0b50a0862213f4b863af228f4fa4f20ca", "a6c2a8510ab7f123bc4cc7927e1f3156bf324bfceafee6ecae8597739cb4b436", "299a7084ca00c8be9dfbf176291a266599308a014edc9c5ddacc07821d003837", "153a44af92202f031e457d09930fd53c85e519bf3a4b79a11b1d946e65a28da8", "3abe35db1c15b4710d9cf755a11f32d95f4e58907ac54fef389bfcf18c231f38", "e0300bb7400e692bccbf20b17fe7ec177aba23e7bfd36dcb7484935ccd214336" ], "rootHash": "437afb5d68e7f875cd91311f6549f4f12324418b39bdbf96cffe3884cb9e8f26" }"#; /// Pubkey for `rekor.sigstage.dev`. const REKOR_STAGING_KEY_PEM: &str = r#" -----BEGIN PUBLIC KEY----- MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEDODRU688UYGuy54mNUlaEBiQdTE9 nYLr0lg6RXowI/QV/RE1azBn4Eg5/2uTOMbhB1/gfcHzijzFi9Tk+g1Prg== -----END PUBLIC KEY----- "#; #[test] fn test_consistency_valid() { let rekor_key = CosignVerificationKey::from_pem( REKOR_STAGING_KEY_PEM.as_bytes(), &SigningScheme::ECDSA_P256_SHA256_ASN1, ) .expect("failed to parse Rekor key"); let log_info_old_raw: RekorLogInfo = serde_json::from_str(LOG_INFO_OLD).expect("failed to deserialize log info test data"); let log_info_old: LogInfo = LogInfo::try_from(log_info_old_raw).expect("failed to convert log info data"); let log_info_new_raw: RekorLogInfo = serde_json::from_str(LOG_INFO_NEW).expect("failed to deserialize log info test data"); let log_info_new: LogInfo = LogInfo::try_from(log_info_new_raw).expect("failed to convert log info data"); let consistency_proof_raw: RekorConsistencyProof = serde_json::from_str(LOG_PROOF).expect("failed to deserialize log proof data"); let consistency_proof = ConsistencyProof::try_from(consistency_proof_raw) .expect("failed to convert log proof data"); log_info_new .verify_consistency( log_info_old.tree_size, &log_info_old.root_hash, &consistency_proof, &rekor_key, ) .expect("failed to accept valid inclusion proof"); } #[test] fn test_consistency_invalid() { let rekor_key = CosignVerificationKey::from_pem( REKOR_STAGING_KEY_PEM.as_bytes(), &SigningScheme::ECDSA_P256_SHA256_ASN1, ) .expect("failed to parse Rekor key"); let log_info_old_raw: RekorLogInfo = serde_json::from_str(LOG_INFO_OLD).expect("failed to deserialize log info test data"); let log_info_old: LogInfo = LogInfo::try_from(log_info_old_raw).expect("failed to convert log info data"); let log_info_new_raw: RekorLogInfo = serde_json::from_str(LOG_INFO_NEW).expect("failed to deserialize log info test data"); let log_info_new: LogInfo = LogInfo::try_from(log_info_new_raw).expect("failed to convert log info data"); let consistency_proof_raw: RekorConsistencyProof = serde_json::from_str(LOG_PROOF).expect("failed to deserialize log proof data"); let consistency_proof = ConsistencyProof::try_from(consistency_proof_raw) .expect("failed to convert log proof data"); let mut test_cases = vec![]; let mut consistency_proof_empty = consistency_proof.clone(); consistency_proof_empty.hashes = vec![]; test_cases.push((consistency_proof_empty, "empty proof")); let mut consistency_proof_additional_hash_raw: RekorConsistencyProof = serde_json::from_str(LOG_PROOF).expect("failed to deserialize log proof data"); consistency_proof_additional_hash_raw .hashes .push("e0300bb7400e692bccbf20b17fe7ec177aba23e7bfd36dcb7484935ccd214336".to_string()); let consistency_proof_additional_hash = ConsistencyProof::try_from(consistency_proof_additional_hash_raw) .expect("failed to convert log proof data"); test_cases.push((consistency_proof_additional_hash, "too many hashes")); let mut consistency_proof_removed_hash = consistency_proof.clone(); let _ = consistency_proof_removed_hash.hashes.pop().unwrap(); test_cases.push((consistency_proof_removed_hash, "too few hashes")); // invert all the hashes in the proof let mut consistency_proof_invalid_hash = consistency_proof.clone(); consistency_proof_invalid_hash.hashes = consistency_proof_invalid_hash .hashes .into_iter() .map(|mut h| { h.reverse(); h }) .collect(); test_cases.push((consistency_proof_invalid_hash, "invalid hashes")); for (proof, desc) in test_cases { let res = log_info_new.verify_consistency( log_info_old.tree_size, &log_info_old.root_hash, &proof, &rekor_key, ); assert!(res.is_err(), "accepted invalid proof: {desc}"); } } } ================================================ FILE: src/rekor/models/mod.rs ================================================ pub mod alpine; pub use self::alpine::Alpine; pub mod alpine_all_of; pub use self::alpine_all_of::AlpineAllOf; pub mod consistency_proof; pub use self::consistency_proof::ConsistencyProof; pub use self::consistency_proof::RekorConsistencyProof; pub mod error; pub use self::error::Error; pub mod hashedrekord; pub use self::hashedrekord::Hashedrekord; pub mod hashedrekord_all_of; pub use self::hashedrekord_all_of::HashedrekordAllOf; pub mod helm; pub use self::helm::Helm; pub mod helm_all_of; pub use self::helm_all_of::HelmAllOf; pub mod inactive_shard_log_info; pub use self::inactive_shard_log_info::InactiveShardLogInfo; pub mod inclusion_proof; pub use self::inclusion_proof::InclusionProof; pub mod intoto; pub use self::intoto::Intoto; pub mod intoto_all_of; pub use self::intoto_all_of::IntotoAllOf; pub mod jar; pub use self::jar::Jar; pub mod jar_all_of; pub use self::jar_all_of::JarAllOf; pub mod log_info; pub use self::log_info::LogInfo; pub use self::log_info::RekorLogInfo; pub mod proposed_entry; pub use self::proposed_entry::ProposedEntry; pub mod rekord; pub use self::rekord::Rekord; pub mod rekord_all_of; pub use self::rekord_all_of::RekordAllOf; pub mod rfc3161; pub use self::rfc3161::Rfc3161; pub mod rfc3161_all_of; pub use self::rfc3161_all_of::Rfc3161AllOf; pub mod rpm; pub use self::rpm::Rpm; pub mod rpm_all_of; pub use self::rpm_all_of::RpmAllOf; pub mod search_index; pub use self::search_index::SearchIndex; pub mod search_index_public_key; pub use self::search_index_public_key::SearchIndexPublicKey; pub mod search_log_query; pub use self::search_log_query::SearchLogQuery; pub mod tuf; pub use self::tuf::Tuf; pub mod tuf_all_of; pub use self::tuf_all_of::TufAllOf; pub mod checkpoint; pub mod log_entry; pub use self::log_entry::LogEntry; ================================================ FILE: src/rekor/models/proposed_entry.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] #[serde(tag = "kind")] pub enum ProposedEntry { #[serde(rename = "alpine")] Alpine { #[serde(rename = "apiVersion")] api_version: String, #[serde(rename = "spec")] spec: serde_json::Value, }, #[serde(rename = "hashedrekord")] Hashedrekord { #[serde(rename = "apiVersion")] api_version: String, #[serde(rename = "spec")] spec: super::hashedrekord::Spec, }, #[serde(rename = "helm")] Helm { #[serde(rename = "apiVersion")] api_version: String, #[serde(rename = "spec")] spec: serde_json::Value, }, #[serde(rename = "intoto")] Intoto { #[serde(rename = "apiVersion")] api_version: String, #[serde(rename = "spec")] spec: serde_json::Value, }, #[serde(rename = "jar")] Jar { #[serde(rename = "apiVersion")] api_version: String, #[serde(rename = "spec")] spec: serde_json::Value, }, #[serde(rename = "rekord")] Rekord { #[serde(rename = "apiVersion")] api_version: String, #[serde(rename = "spec")] spec: serde_json::Value, }, #[serde(rename = "rfc3161")] Rfc3161 { #[serde(rename = "apiVersion")] api_version: String, #[serde(rename = "spec")] spec: serde_json::Value, }, #[serde(rename = "rpm")] Rpm { #[serde(rename = "apiVersion")] api_version: String, #[serde(rename = "spec")] spec: serde_json::Value, }, #[serde(rename = "tuf")] Tuf { #[serde(rename = "apiVersion")] api_version: String, #[serde(rename = "spec")] spec: serde_json::Value, }, } ================================================ FILE: src/rekor/models/rekord.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ /// Rekord : Rekord object use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Rekord { #[serde(rename = "kind")] pub kind: String, #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl Rekord { /// Rekord object pub fn new(kind: String, api_version: String, spec: serde_json::Value) -> Rekord { Rekord { kind, api_version, spec, } } } ================================================ FILE: src/rekor/models/rekord_all_of.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct RekordAllOf { #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl RekordAllOf { pub fn new(api_version: String, spec: serde_json::Value) -> RekordAllOf { RekordAllOf { api_version, spec } } } ================================================ FILE: src/rekor/models/rfc3161.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ /// Rfc3161 : RFC3161 Timestamp use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Rfc3161 { #[serde(rename = "kind")] pub kind: String, #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl Rfc3161 { /// RFC3161 Timestamp pub fn new(kind: String, api_version: String, spec: serde_json::Value) -> Rfc3161 { Rfc3161 { kind, api_version, spec, } } } ================================================ FILE: src/rekor/models/rfc3161_all_of.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Rfc3161AllOf { #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl Rfc3161AllOf { pub fn new(api_version: String, spec: serde_json::Value) -> Rfc3161AllOf { Rfc3161AllOf { api_version, spec } } } ================================================ FILE: src/rekor/models/rpm.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; /// Rpm : RPM package #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Rpm { #[serde(rename = "kind")] pub kind: String, #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl Rpm { /// RPM package pub fn new(kind: String, api_version: String, spec: serde_json::Value) -> Rpm { Rpm { kind, api_version, spec, } } } ================================================ FILE: src/rekor/models/rpm_all_of.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct RpmAllOf { #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl RpmAllOf { pub fn new(api_version: String, spec: serde_json::Value) -> RpmAllOf { RpmAllOf { api_version, spec } } } ================================================ FILE: src/rekor/models/search_index.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct SearchIndex { #[serde(rename = "email", skip_serializing_if = "Option::is_none")] pub email: Option, #[serde(rename = "publicKey", skip_serializing_if = "Option::is_none")] pub public_key: Option, #[serde(rename = "hash", skip_serializing_if = "Option::is_none")] pub hash: Option, } impl SearchIndex { pub fn new() -> SearchIndex { SearchIndex { email: None, public_key: None, hash: None, } } } ================================================ FILE: src/rekor/models/search_index_public_key.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct SearchIndexPublicKey { #[serde(rename = "format")] pub format: Format, #[serde(rename = "content", skip_serializing_if = "Option::is_none")] pub content: Option, #[serde(rename = "url", skip_serializing_if = "Option::is_none")] pub url: Option, } impl SearchIndexPublicKey { pub fn new(format: Format) -> SearchIndexPublicKey { SearchIndexPublicKey { format, content: None, url: None, } } } /// The supported pluggable types to sign and upload data #[derive( Default, Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, )] pub enum Format { #[serde(rename = "pgp")] #[default] Pgp, #[serde(rename = "x509")] X509, #[serde(rename = "minisign")] Minisign, #[serde(rename = "ssh")] Ssh, #[serde(rename = "tuf")] Tuf, } ================================================ FILE: src/rekor/models/search_log_query.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Eq, PartialEq, Default, Serialize, Deserialize)] pub struct SearchLogQuery { #[serde(rename = "entryUUIDs", skip_serializing_if = "Option::is_none")] pub entry_uuids: Option>, #[serde(rename = "logIndexes", skip_serializing_if = "Option::is_none")] pub log_indexes: Option>, #[serde(rename = "entries", skip_serializing_if = "Option::is_none")] pub entries: Option>, } impl SearchLogQuery { pub fn new() -> SearchLogQuery { SearchLogQuery { entry_uuids: None, log_indexes: None, entries: None, } } } ================================================ FILE: src/rekor/models/tuf.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; /// Tuf : TUF metadata #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct Tuf { #[serde(rename = "kind")] pub kind: String, #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl Tuf { /// TUF metadata pub fn new(kind: String, api_version: String, spec: serde_json::Value) -> Tuf { Tuf { kind, api_version, spec, } } } ================================================ FILE: src/rekor/models/tuf_all_of.rs ================================================ /* * Rekor * * Rekor is a cryptographically secure, immutable transparency log for signed software releases. * * The version of the OpenAPI document: 0.0.1 * * Generated by: https://openapi-generator.tech */ use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize)] pub struct TufAllOf { #[serde(rename = "apiVersion")] pub api_version: String, #[serde(rename = "spec")] pub spec: serde_json::Value, } impl TufAllOf { pub fn new(api_version: String, spec: serde_json::Value) -> TufAllOf { TufAllOf { api_version, spec } } } ================================================ FILE: src/trust/mod.rs ================================================ // // Copyright 2024 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::collections::BTreeMap; use pki_types::CertificateDer; #[cfg_attr(docsrs, doc(cfg(feature = "sigstore-trust-root")))] #[cfg(feature = "sigstore-trust-root")] pub mod sigstore; /// A `TrustRoot` owns all key material necessary for establishing a root of trust. pub trait TrustRoot { fn fulcio_certs(&self) -> crate::errors::Result>>; fn rekor_keys(&self) -> crate::errors::Result>; fn ctfe_keys(&self) -> crate::errors::Result>; } /// A `ManualTrustRoot` is a [TrustRoot] with out-of-band trust materials. /// As it does not establish a trust root with TUF, users must initialize its materials themselves. #[derive(Debug, Default)] pub struct ManualTrustRoot<'a> { pub fulcio_certs: Vec>, pub rekor_keys: BTreeMap>, pub ctfe_keys: BTreeMap>, } impl<'a> TrustRoot for ManualTrustRoot<'a> { fn fulcio_certs(&self) -> crate::errors::Result>> { Ok(self.fulcio_certs.clone()) } fn rekor_keys(&self) -> crate::errors::Result> { Ok(self .rekor_keys .iter() .map(|(k, v)| (k.clone(), v.as_slice())) .collect()) } fn ctfe_keys(&self) -> crate::errors::Result> { Ok(self .ctfe_keys .iter() .map(|(k, v)| (k.clone(), v.as_slice())) .collect()) } } ================================================ FILE: src/trust/sigstore/constants.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. pub(crate) const SIGSTORE_METADATA_BASE: &str = "https://tuf-repo-cdn.sigstore.dev"; pub(crate) const SIGSTORE_TARGET_BASE: &str = "https://tuf-repo-cdn.sigstore.dev/targets"; macro_rules! impl_static_resource { {$($name:literal,)+} => { #[inline] pub(crate) fn static_resource(name: N) -> Option<&'static [u8]> where N: AsRef { match name.as_ref() { $( $name => Some(include_bytes!(concat!(env!("CARGO_MANIFEST_DIR"), "/trust_root/prod/", $name))) ),+, _ => None, } } }; } impl_static_resource! { "root.json", "trusted_root.json", } ================================================ FILE: src/trust/sigstore/mod.rs ================================================ // // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Helper Structs to interact with the Sigstore TUF repository. //! //! The main interaction point is [`SigstoreTrustRoot`], which fetches Rekor's //! public key and Fulcio's certificate. //! //! These can later be given to [`cosign::ClientBuilder`](crate::cosign::ClientBuilder) //! to enable Fulcio and Rekor integrations. use std::{ collections::BTreeMap, fs, path::{Path, PathBuf}, }; use futures_util::TryStreamExt; use pki_types::CertificateDer; use sha2::{Digest, Sha256}; use sigstore_protobuf_specs::dev::sigstore::{ common::v1::TimeRange, trustroot::v1::{ CertificateAuthority, ClientTrustConfig, TransparencyLogInstance, TrustedRoot, }, }; use tokio_util::bytes::BytesMut; use tough::TargetName; use tracing::debug; mod constants; mod transport; use crate::errors::{Result, SigstoreError}; pub use crate::trust::{ManualTrustRoot, TrustRoot}; /// Securely fetches Rekor public key and Fulcio certificates from Sigstore's TUF repository. #[derive(Debug)] pub struct SigstoreTrustRoot { trusted_root: TrustedRoot, } impl SigstoreTrustRoot { /// Constructs a new trust root from a [`tough::Repository`]. async fn from_tough( repository: &tough::Repository, checkout_dir: Option<&Path>, ) -> Result { let trusted_root = { let data = Self::fetch_target(repository, checkout_dir, "trusted_root.json").await?; serde_json::from_slice(&data[..])? }; Ok(Self { trusted_root }) } /// Constructs a new trust root backed by the Sigstore Public Good Instance. pub async fn new(cache_dir: Option<&Path>) -> Result { // These are statically defined and should always parse correctly. let metadata_base = url::Url::parse(constants::SIGSTORE_METADATA_BASE)?; let target_base = url::Url::parse(constants::SIGSTORE_TARGET_BASE)?; let client = reqwest::Client::new(); let transport = transport::ReqwestTransport::new(client); let repository = tough::RepositoryLoader::new( &constants::static_resource("root.json").expect("Failed to fetch embedded TUF root!"), metadata_base, target_base, ) .transport(transport) .expiration_enforcement(tough::ExpirationEnforcement::Safe) .load() .await .map_err(Box::new)?; Self::from_tough(&repository, cache_dir).await } /// Constructs a new trust root from a JSON object containing a /// [`TrustedRoot`](https://github.com/sigstore/protobuf-specs). /// /// # Warning /// /// This constructor does not perform any validation of the provided data. /// The caller must ensure that the data is trustworthy. /// Using untrusted data may lead to security vulnerabilities. pub fn from_trusted_root_json_unchecked(data: &[u8]) -> Result { let trusted_root: TrustedRoot = serde_json::from_slice(data)?; Ok(Self { trusted_root }) } /// Constructs a `SigstoreTrustRoot` instance from a Sigstore client trust configuration file. /// /// Reads the specified PKI file, parses its JSON content into a `ClientTrustConfig`, /// and attempts to convert it into the SigstoreTrustRoot type. Returns an error if the file /// cannot be read, the JSON is malformed, or the conversion fails. /// /// # Arguments /// /// * `pki_file` - Path to the Sigstore PKI trust configuration file. /// /// # Errors /// /// Returns `SigstoreError::SigstoreBundleMalformedError` if the file cannot be read /// or parsed, or if the conversion fails. pub fn from_client_trust_config(pki_file: &PathBuf) -> Result { let json_bytes = fs::read(pki_file).map_err(|e| SigstoreError::IOErrorWithContext { context: format!("could not read Sigstore PKI file {}", pki_file.display()), source: e, })?; let client_trust_config: ClientTrustConfig = serde_json::from_slice(json_bytes.as_slice()) .map_err(|e| { SigstoreError::SigstorePKIFileMalformedError(format!( "could not parse trust config file {}: {:?}", pki_file.display(), e )) })?; client_trust_config.try_into() } async fn fetch_target( repository: &tough::Repository, checkout_dir: Option<&Path>, name: N, ) -> Result> where N: TryInto, { let name: TargetName = name.try_into().map_err(Box::new)?; let local_path = checkout_dir.as_ref().map(|d| d.join(name.raw())); let read_remote_target = || async { match repository.read_target(&name).await { Ok(Some(s)) => Ok(s.try_collect::().await.map_err(Box::new)?), _ => Err(SigstoreError::TufTargetNotFoundError(name.raw().to_owned())), } }; // First, try reading the target from disk cache. let data = if let Some(Ok(local_data)) = local_path.as_ref().map(std::fs::read) { debug!("{}: reading from disk cache", name.raw()); local_data.to_vec() // Try reading the target embedded into the binary. } else if let Some(embedded_data) = constants::static_resource(name.raw()) { debug!("{}: reading from embedded resources", name.raw()); embedded_data.to_vec() // If all else fails, read the data from the TUF repo. } else { match read_remote_target().await { Ok(remote_data) => { debug!("{}: reading from remote", name.raw()); remote_data.to_vec() } _ => { return Err(SigstoreError::TufTargetNotFoundError(name.raw().to_owned())); } } }; // Get metadata (hash) of the target and update the disk copy if it doesn't match. let Some(target) = repository.targets().signed.targets.get(&name) else { return Err(SigstoreError::TufMetadataError(format!( "couldn't get metadata for {}", name.raw() ))); }; let data = if Sha256::digest(&data)[..] != target.hashes.sha256[..] { debug!("{}: out of date", name.raw()); read_remote_target().await?.to_vec() } else { data }; // Write our updated data back to the disk. if let Some(local_path) = local_path { std::fs::write(local_path, &data)?; } Ok(data) } #[inline] fn tlog_keys(tlogs: &[TransparencyLogInstance]) -> impl Iterator { tlogs .iter() .filter(|tlog| { if let Some(public_key) = tlog.public_key.as_ref() { is_timerange_valid(public_key.valid_for.as_ref(), false) } else { false } }) .filter_map(|tlog| { let key_id = tlog .log_id .as_ref() .map(|log_id| hex::encode(log_id.key_id.as_slice())); let public_key_raw = tlog .public_key .as_ref() .and_then(|pk| pk.raw_bytes.as_ref()); match (key_id, public_key_raw) { (Some(id), Some(key)) => Some((id, key.as_slice())), _ => None, } }) } #[inline] fn ca_keys( cas: &[CertificateAuthority], allow_expired: bool, ) -> impl Iterator { cas.iter() .filter(move |ca| is_timerange_valid(ca.valid_for.as_ref(), allow_expired)) .flat_map(|ca| ca.cert_chain.as_ref()) .flat_map(|chain| chain.certificates.iter()) .map(|cert| cert.raw_bytes.as_slice()) } } impl TryFrom for SigstoreTrustRoot { type Error = SigstoreError; fn try_from(value: ClientTrustConfig) -> Result { let trusted_root = value .trusted_root .ok_or(SigstoreError::SigstorePKIFileMalformedError( "trusted_root field is missing".to_owned(), ))?; Ok(SigstoreTrustRoot { trusted_root }) } } impl crate::trust::TrustRoot for SigstoreTrustRoot { /// Fetch Fulcio certificates from the given TUF repository or reuse /// the local cache if its contents are not outdated. /// /// The contents of the local cache are updated when they are outdated. fn fulcio_certs(&self) -> Result>> { // Allow expired certificates: they may have been active when the // certificate was used to sign. let certs = Self::ca_keys(&self.trusted_root.certificate_authorities, true); let certs: Vec<_> = certs .map(|c| CertificateDer::from(c).into_owned()) .collect(); if certs.is_empty() { Err(SigstoreError::TufMetadataError( "Fulcio certificates not found".into(), )) } else { Ok(certs) } } /// Fetch Rekor public keys from the given TUF repository or reuse /// the local cache if it's not outdated. /// /// The contents of the local cache are updated when they are outdated. fn rekor_keys(&self) -> Result> { let keys: BTreeMap = Self::tlog_keys(&self.trusted_root.tlogs).collect(); Ok(keys) } /// Fetch CTFE public keys from the given TUF repository or reuse /// the local cache if it's not outdated. /// /// The contents of the local cache are updated when they are outdated. fn ctfe_keys(&self) -> Result> { let keys: BTreeMap = Self::tlog_keys(&self.trusted_root.ctlogs).collect(); if keys.is_empty() { Err(SigstoreError::TufMetadataError( "CTFE keys not found".into(), )) } else { Ok(keys) } } } /// Given a `range`, checks that the the current time is not before `start`. If /// `allow_expired` is `false`, also checks that the current time is not after /// `end`. fn is_timerange_valid(range: Option<&TimeRange>, allow_expired: bool) -> bool { let now = chrono::Utc::now().timestamp(); let start = range.and_then(|r| r.start.as_ref()).map(|t| t.seconds); let end = range.and_then(|r| r.end.as_ref()).map(|t| t.seconds); match (start, end) { // If there was no validity period specified, the key is always valid. (None, _) => true, // Active: if the current time is before the starting period, we are not yet valid. (Some(start), _) if now < start => false, // If we want Expired keys, then we don't need to check the end. _ if allow_expired => true, // If there is no expiry date, the key is valid. (_, None) => true, // If we have an expiry date, check it. (_, Some(end)) => now <= end, } } #[cfg(test)] mod tests { use super::*; use rstest::{fixture, rstest}; use std::fs; use std::io::prelude::*; use std::path::Path; use std::time::SystemTime; use tempfile::{NamedTempFile, TempDir}; fn verify(root: &SigstoreTrustRoot, cache_dir: Option<&Path>) { if let Some(cache_dir) = cache_dir { assert!( cache_dir.join("trusted_root.json").exists(), "the trusted root was not cached" ); } assert!( root.fulcio_certs().is_ok_and(|v| !v.is_empty()), "no Fulcio certs established" ); assert!( root.rekor_keys().is_ok_and(|v| !v.is_empty()), "no Rekor keys established" ); assert!( root.ctfe_keys().is_ok_and(|v| !v.is_empty()), "no CTFE keys established" ); } #[fixture] fn cache_dir() -> TempDir { TempDir::new().expect("cannot create temp cache dir") } async fn trust_root(cache: Option<&Path>) -> SigstoreTrustRoot { SigstoreTrustRoot::new(cache) .await .expect("failed to construct SigstoreTrustRoot") } #[rstest] #[tokio::test] async fn trust_root_fetch(#[values(None, Some(cache_dir()))] cache: Option) { let cache = cache.as_ref().map(|t| t.path()); let root = trust_root(cache).await; verify(&root, cache); } #[rstest] #[tokio::test] async fn trust_root_outdated(cache_dir: TempDir) { let trusted_root_path = cache_dir.path().join("trusted_root.json"); let outdated_data = b"fake trusted root"; fs::write(&trusted_root_path, outdated_data) .expect("failed to write to trusted root cache"); let cache = Some(cache_dir.path()); let root = trust_root(cache).await; verify(&root, cache); let data = fs::read(&trusted_root_path).expect("failed to read from trusted root cache"); assert_ne!(data, outdated_data, "TUF cache was not properly updated"); } #[test] fn test_is_timerange_valid() { fn range_from(start: i64, end: i64) -> TimeRange { let base = chrono::Utc::now(); let start: SystemTime = (base + chrono::TimeDelta::seconds(start)).into(); let end: SystemTime = (base + chrono::TimeDelta::seconds(end)).into(); TimeRange { start: Some(start.into()), end: Some(end.into()), } } assert!(is_timerange_valid(None, true)); assert!(is_timerange_valid(None, false)); // Test lower bound conditions // Valid: 1 ago, 1 from now assert!(is_timerange_valid(Some(&range_from(-1, 1)), false)); // Invalid: 1 from now, 1 from now assert!(!is_timerange_valid(Some(&range_from(1, 1)), false)); // Test upper bound conditions // Invalid: 1 ago, 1 ago assert!(!is_timerange_valid(Some(&range_from(-1, -1)), false)); // Valid: 1 ago, 1 ago assert!(is_timerange_valid(Some(&range_from(-1, -1)), true)) } #[rstest] #[case::missing_file(None, Some("could not read Sigstore PKI file"))] #[case::missing_trusted_root_field(Some(ClientTrustConfig{ media_type: "application/vnd.dev.sigstore.clienttrustconfig.v0.1+json".to_owned(), trusted_root: None, ..Default::default() }), Some("trusted_root field is missing"))] #[case::trusted_root_present(Some(ClientTrustConfig{ media_type: "application/vnd.dev.sigstore.clienttrustconfig.v0.1+json".to_owned(), trusted_root: Some(TrustedRoot{ media_type: "application/vnd.dev.sigstore.trustedroot.v0.2+json".to_owned(), ..Default::default() }), ..Default::default() }), None)] fn test_load_missing_trust_client_config( #[case] content: Option, #[case] error: Option<&str>, ) { let mut trust_client_config_file = NamedTempFile::new().expect("failed to create temp file"); if let Some(content) = content.as_ref() { trust_client_config_file .write_all( serde_json::to_string(&content) .expect("failed to serialize content") .as_bytes(), ) .expect("failed to write to temp file"); trust_client_config_file .flush() .expect("failed to flush temp file"); } let trust_client_config_file_path = if content.is_some() { trust_client_config_file.path().to_path_buf() } else { PathBuf::from("non_existent_file.json") }; let trust_root = SigstoreTrustRoot::from_client_trust_config(&trust_client_config_file_path); if let Some(expected_msg) = error { let error = trust_root.expect_err("should fail"); assert!( error.to_string().contains(expected_msg), "error message does not contain expected substring. Got: {}, expected to contain: {}", error, expected_msg ); return; } assert_eq!( content .expect("expected content") .trusted_root .expect("missing trusted_root"), trust_root.expect("failed to parse file").trusted_root ); } } ================================================ FILE: src/trust/sigstore/transport.rs ================================================ // // Copyright 2026 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! A [`tough::Transport`] implementation backed by `reqwest` (0.13). //! //! This replaces tough's built-in `HttpTransport` (which depends on reqwest 0.12) //! so that the entire crate uses a single version of reqwest. use std::{ pin::Pin, task::{Context, Poll}, }; use async_trait::async_trait; use futures_util::Stream; use reqwest::Client; use tough::{ Bytes, FilesystemTransport, Transport, TransportError, TransportErrorKind, TransportStream, }; use url::Url; /// A [`Transport`] that fetches over HTTP(S) using `reqwest` 0.13 and /// delegates `file://` URLs to [`FilesystemTransport`]. #[derive(Clone, Debug)] pub(crate) struct ReqwestTransport { client: Client, file: FilesystemTransport, } impl ReqwestTransport { pub(crate) fn new(client: Client) -> Self { Self { client, file: FilesystemTransport, } } } #[async_trait] impl Transport for ReqwestTransport { async fn fetch(&self, url: Url) -> Result { match url.scheme() { "file" => self.file.fetch(url).await, "http" | "https" => { let response = self .client .get(url.as_str()) .send() .await .and_then(|r| r.error_for_status()) .map_err(|e| { let kind = match e.status() { Some(s) if s == reqwest::StatusCode::NOT_FOUND || s == reqwest::StatusCode::FORBIDDEN || s == reqwest::StatusCode::GONE => { TransportErrorKind::FileNotFound } _ => TransportErrorKind::Other, }; TransportError::new_with_cause(kind, url.clone(), e) })?; Ok(Box::pin(BytesStreamAdapter { inner: Box::pin(response.bytes_stream()), url, })) } _ => Err(TransportError::new( TransportErrorKind::UnsupportedUrlScheme, url, )), } } } /// Adapts `reqwest::Response::bytes_stream()` into a [`TransportStream`] by /// mapping `reqwest::Error` to `TransportError`. struct BytesStreamAdapter { inner: Pin> + Send + Sync>>, url: Url, } impl Stream for BytesStreamAdapter { type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { self.inner.as_mut().poll_next(cx).map(|opt| { opt.map(|result| { result.map_err(|e| { TransportError::new_with_cause(TransportErrorKind::Other, self.url.clone(), e) }) }) }) } } #[cfg(test)] mod tests { use super::*; use std::io::Write; use tough::IntoVec; fn transport() -> ReqwestTransport { ReqwestTransport::new(Client::new()) } #[tokio::test] async fn unsupported_scheme() { let url = Url::parse("ftp://example.com/file.txt").expect("failed to parse URL"); let err = transport() .fetch(url) .await .err() .expect("expected an error"); assert_eq!(err.kind(), TransportErrorKind::UnsupportedUrlScheme); } #[tokio::test] async fn file_not_found_on_disk() { let url = Url::parse("file:///nonexistent/path/to/file.txt").expect("failed to parse URL"); let err = transport() .fetch(url) .await .err() .expect("expected an error"); assert_eq!(err.kind(), TransportErrorKind::FileNotFound); } #[tokio::test] async fn file_found_on_disk() { let mut tmp = tempfile::NamedTempFile::new().expect("failed to create temp file"); tmp.write_all(b"hello transport") .expect("failed to write to temp file"); let url = Url::from_file_path(tmp.path()).expect("failed to build file URL"); let stream = transport().fetch(url).await.expect("fetch failed"); let body = stream.into_vec().await.expect("failed to read stream"); assert_eq!(body, b"hello transport"); } } ================================================ FILE: tests/conformance/Cargo.toml ================================================ [package] authors = ["sigstore-rs developers"] description = "sigstore conformance testing workflow" edition = "2021" license = "Apache-2.0" name = "sigstore-conformance" version = "0.0.1" [dependencies] anyhow = "1.0.75" clap = { version = "4.0.8", features = ["derive"] } serde_json = "1.0.107" sigstore = { path = "../../", default-features = false, features = [ "full", "native-tls", ] } tracing-subscriber = "0.3" [[bin]] name = "sigstore" path = "conformance.rs" ================================================ FILE: tests/conformance/conformance.rs ================================================ // // Copyright 2023 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // CLI implemented to specification: // https://github.com/sigstore/sigstore-conformance/blob/main/docs/cli_protocol.md use std::{fs, process::exit}; use clap::{Parser, Subcommand}; use sigstore::{ bundle::sign::SigningContext, bundle::verify::{blocking::Verifier, policy}, oauth::IdentityToken, }; #[derive(Parser, Debug)] struct Cli { #[command(subcommand)] command: Commands, } #[derive(Subcommand, Debug)] enum Commands { Sign(Sign), SignBundle(SignBundle), Verify(Verify), VerifyBundle(VerifyBundle), } #[derive(Parser, Debug)] struct Sign { // The OIDC identity token to use #[clap(long)] identity_token: String, // The path to write the signature to #[clap(long)] signature: String, // The path to write the signing certificate to #[clap(long)] certificate: String, // The artifact to sign artifact: String, } #[derive(Parser, Debug)] struct SignBundle { // The OIDC identity token to use #[clap(long)] identity_token: String, // The path to write the bundle to #[clap(long)] bundle: String, // The artifact to sign artifact: String, } #[derive(Parser, Debug)] struct Verify { // The path to the signature to verify #[clap(long)] signature: String, // The path to the signing certificate to verify #[clap(long)] certificate: String, // The expected identity in the signing certificate's SAN extension #[clap(long)] certificate_identity: String, // The expected OIDC issuer for the signing certificate #[clap(long)] certificate_oidc_issuer: String, // The path to the artifact to verify artifact: String, } #[derive(Parser, Debug)] struct VerifyBundle { // The path to the Sigstore bundle to verify #[clap(long)] bundle: String, // The expected identity in the signing certificate's SAN extension #[clap(long)] certificate_identity: String, // The expected OIDC issuer for the signing certificate #[clap(long)] certificate_oidc_issuer: String, // The path to the artifact to verify artifact: String, } fn main() { tracing_subscriber::fmt::init(); let cli = Cli::parse(); let result = match cli.command { Commands::SignBundle(args) => sign_bundle(args), Commands::VerifyBundle(args) => verify_bundle(args), _ => unimplemented!("sig/cert commands"), }; if let Err(error) = result { eprintln!("Operation failed:\n{error:?}"); exit(-1); } eprintln!("Operation succeeded!"); } fn sign_bundle(args: SignBundle) -> anyhow::Result<()> { let SignBundle { identity_token, bundle, artifact, } = args; let identity_token = IdentityToken::try_from(identity_token.as_str())?; let bundle = fs::File::create(bundle)?; let mut artifact = fs::File::open(artifact)?; let context = SigningContext::production()?; let signer = context.blocking_signer(identity_token); let signing_artifact = signer?.sign(&mut artifact)?; let bundle_data = signing_artifact.to_bundle(); serde_json::to_writer(bundle, &bundle_data)?; Ok(()) } fn verify_bundle(args: VerifyBundle) -> anyhow::Result<()> { let VerifyBundle { bundle, certificate_identity, certificate_oidc_issuer, artifact, } = args; let bundle = fs::File::open(bundle)?; let mut artifact = fs::File::open(artifact)?; let bundle: sigstore::bundle::Bundle = serde_json::from_reader(bundle)?; let verifier = Verifier::production()?; verifier.verify( &mut artifact, bundle, &policy::Identity::new(certificate_identity, certificate_oidc_issuer), true, )?; Ok(()) } ================================================ FILE: tests/data/keys/cosign_generated_encrypted_empty_private.key ================================================ -----BEGIN ENCRYPTED SIGSTORE PRIVATE KEY----- eyJrZGYiOnsibmFtZSI6InNjcnlwdCIsInBhcmFtcyI6eyJOIjo2NTUzNiwiciI6 OCwicCI6MX0sInNhbHQiOiJFY3pJR3Z0bnpFcCsxaEpudERnbGI1UnoxN3gwQVMy YklvWGRNcDQrU1NJPSJ9LCJjaXBoZXIiOnsibmFtZSI6Im5hY2wvc2VjcmV0Ym94 Iiwibm9uY2UiOiJtSVVKbG1OMzNINExvZ0dhaG5MamdmK3R4SmJwci93MCJ9LCJj aXBoZXJ0ZXh0IjoiM3FtS2FidXRuYm1WT1IvTkZGM2NDaUErTXp1WWQ5L0VERDVI MVlUSGhQVzZsSzAvdjVqdDZCQzlsME12NGV5am9qZkl0d3B6a2JJOXQrZGx5VXZ3 VUgvMWZjbkFZT2dEdXRLQzkvSkNiOE02SVY2VHpVaDF5c0ZFWDFzeG9xb1FzeUpL bjM0UVlldFNlaVdDaGtmUHUyZXByQjFEV0pwekdzTGZIakxNVTkzOEdmNk1xM1A0 N0ZSd2syZzY1cFZnSGMwVWV1L1N1OUs0Zmc9PSJ9 -----END ENCRYPTED SIGSTORE PRIVATE KEY----- ================================================ FILE: tests/data/keys/ecdsa_encrypted_private.key ================================================ -----BEGIN ENCRYPTED COSIGN PRIVATE KEY----- eyJrZGYiOnsibmFtZSI6InNjcnlwdCIsInBhcmFtcyI6eyJOIjozMjc2OCwiciI6 OCwicCI6MX0sInNhbHQiOiIrRFJrTXNRZmNUNTNKNCs1Y3VkUnlnWTUyWUMrK0RW TmxaSTN4VnlwWFlvPSJ9LCJjaXBoZXIiOnsibmFtZSI6Im5hY2wvc2VjcmV0Ym94 Iiwibm9uY2UiOiJnNTFORnBqQUxlemhvbGVWTWVjUFJUNUErMVhMcDAxeiJ9LCJj aXBoZXJ0ZXh0Ijoib0YvN09mOGlQRDlkNlc2Mk5SWGVibEszWXZmNFFucTZORm16 Q0VyV2FTVUJOZTNXSFJiRVNYK2dJOCszejg4TDBDVXlWdm9LTHhuR0xhVnhnczVD YUdyekcrMWxFTEI1cGRMVmZXd1VlQzlLelNab0hxZHZLaUsvUmNodVhpS0VqYmdv ajNGd2pMcmhEYkY5a2lVWk93REpuT01kUkoxNnI3emxjRHdwT3htOXFrbWFDWDlB VjQ4b1RXVG0rQXExOHlDZE50MDYybGdudVE9PSJ9 -----END ENCRYPTED COSIGN PRIVATE KEY----- ================================================ FILE: tests/data/keys/ecdsa_private.key ================================================ -----BEGIN PRIVATE KEY----- MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgAbUSDapDt/yShCq1 rzJwhGj9fMQd21E5SXmln12o8J+hRANCAAQfFCADQhM36xItBLLsGZmMDe5hqtPc gRx8+8Zf40O4VAyyv3KO5HePY23r/kVZ+YkXwS55sYSpF5F++AQml0PP -----END PRIVATE KEY----- ================================================ FILE: tests/data/keys/ed25519_encrypted_private.key ================================================ -----BEGIN ENCRYPTED SIGSTORE PRIVATE KEY----- eyJrZGYiOnsibmFtZSI6InNjcnlwdCIsInBhcmFtcyI6eyJOIjozMjc2OCwiciI6 OCwicCI6MX0sInNhbHQiOiJ1elF5Snc4b1FjWCt1T3RlWDljSHpVOEhycWlJVzJV ck9YbnpJb1ljaUtJPSJ9LCJjaXBoZXIiOnsibmFtZSI6Im5hY2wvc2VjcmV0Ym94 Iiwibm9uY2UiOiJKeUJDMS81K2E3emtxdnE5dVpTaHIvZDRwNXVjQW1WQSJ9LCJj aXBoZXJ0ZXh0IjoiZC9TMndRaVJ0YjcwazFmZEd0ZzRFa0RXZ0ZJcU9NRWd3TDNI bUtzWjBpdDlJcTFRSVplUGJoYWRTKzJORUZzWVA4TlhVOGpoT1dtb0QreWZwbkMz dTBYQTRvcFBib2JwbEpleWJEUUtQdW1sa0tiRUc5MnRCa3pUMFJwNnA1dDdFeVVk In0= -----END ENCRYPTED SIGSTORE PRIVATE KEY----- ================================================ FILE: tests/data/keys/ed25519_private.key ================================================ -----BEGIN PRIVATE KEY----- MFECAQEwBQYDK2VwBCIEILZ7IMwQHwlrU+a5l0WnujhvsmQXrWsStjtbAj8n/EiJ gSEAXtg9yVbH8JADtC6sn4NBh1CSDWeCv0ABIj4TtoJj9ak= -----END PRIVATE KEY----- ================================================ FILE: tests/data/keys/rsa_encrypted_private.key ================================================ -----BEGIN ENCRYPTED SIGSTORE PRIVATE KEY----- eyJrZGYiOnsibmFtZSI6InNjcnlwdCIsInBhcmFtcyI6eyJOIjozMjc2OCwiciI6 OCwicCI6MX0sInNhbHQiOiJMelBIYTM2VkllY01YZ1NkeXMyelIreHdpRWxTcGF0 OXBUZFlLVXVOK3VVPSJ9LCJjaXBoZXIiOnsibmFtZSI6Im5hY2wvc2VjcmV0Ym94 Iiwibm9uY2UiOiJkUncvUXMvOGFXUzdhZzJjS2JUM1pVMDhBdVR1QmswQiJ9LCJj aXBoZXJ0ZXh0IjoiZDBRZUlIWmlpYVlCenlodXB5R0FFRmZtVzJCTFFxemJTRm9Y VnRDek5JK2VJTm8xOU9RTW1VNHo0Z2NQY1FHbkE3ay9yc3h0NWRuWjFHOHpsZ0Fl UzlUVFVMbWdSZjU5Y1oyRGRrOHRYVSs3R3R3YkNCTWFPY1hCSTBCUDc1VGJJK0F6 MEUvT1drZjNhQlhOSE5veU5Ecklyc05obEluUTZ3ODIwZ3pPK0xUYmxVbnJ0UDVm OFV6V0hDc09hazN1M0hIOXE4bytOZkJ2ZXV1cUp5MUNtckhYamJJWjRtaHR6b2tv VnROeGQwTFpQN3c3aXJFbXBQaExLMHBWRHpnTlg1OTdGVXlNUzFGcVh1YnBDbFR6 QVBQNUZmNG9kaXJMZVdGL3djUkxFNlhkcDkxeVRVZHoxZ2tQRnN3UmZPVmJYbWUz d3VwNGhrMXQxeWpMZkdYbUYzWjdWWEFyRDhyenV5aDhsZUpaYlZ0ekRXSzlvVmw3 STNIYmx6Qmx5eGplRjZZeVAvZ1JqUjBCOGZxN1F2a1RjdHlvcHgxdnNsM1E2S3ly YitiWE5mMGdMSkxRQ01Id3h2TE0yaHFoek5jcDYxendoT1hoNjdpK2grbUtweXBk a1FMOGZ2NEI1RTd0Y0prTVg2cTJoeHpMQVdWRXFwTUVqY1k0dmszd0dkLzhPckZQ aFd3NEpMY1c5VTdSRUx3ZTM2RFpzbk9DNW0zK3VRZDJEYUNQSUhyUEw3eVFYOUhv Qnp1UnFXQ3R3TXNXNWZmMFgycEhpY3l4MVdlY3JMc2crc2Uwck1QRUJNT3hXL3VZ VjRKTnhBa0FuMTRsbGZqRmlDZlorUGNMcVhLOXVON3k4dU9VeGVYN0hCSmRCdmVT emRiYXZoRVExbm4rU0NUK21YQjF0WXJBNFl5a2VMa2RzRWtXdGtHZFhnVjNFQmtM TDByVzlWUENWdkEwTS9mU1ZScGpmQ3g3TkZiZ21GSkl0T1dWMmczWUxxN3h0WU1i VFU5YWdpbzFBTktMeUtlYkFNdDJOUmw1T0sxSXdya29rUXo0WjZwZWlqeWJlMDBT ekZ0akQ2cTVQK3VxTHZwT0NwMEphdTdScGRsOXJqN2lmMTRRRWozNFhvaml4Sm9a ZXprY1k5SEp6QllHSWMyVnpQaEF0TjdIcnZoUDVlVHFaa2ozek5aZnBiT3Nja3BB eFk5TTRNYWhjd3g4a0VGdy8vZFZKZWREVGx2WDc2ODAvL2t3RmROYmNTcmlZYnVD UEkyR3Y2YXVnZ2RweCtWWTY2SmtZbzNYUVdmM255Z0xOUk0xMUVTTWVkVHFuL3NN WnMrNGRDaG84b0tqbGhzZU9URkdDOEpWeENaV2IraHh2RUNNZ0RLTExZbW1PS2J1 cC8vWjg3cVZtUGZjUjc5RVhmN2k1bUZ6QnpzRUFOSXYvaVNpTStSM1R4NXVIUEpr eXRScTVJNkhvOWEzKzZPV3Q2V2dUbGd5aDdmZmZDWGJXamFGNVJHNE1TcDdQV2FD QlBvZ3lnZ0ZFazRIWjUyRWt1S1B4TGZsc200QnpkT0FBdmZhaTloSmt0N2t4WUpS YnVMb3cvMWZxaXNmNHJwcTU3WlZCelJBR09UYW1mT0Y3RnE1a1JNL0FEWStYZWlU UVJ3ZVRFVk4wOWh1cHdhVm9Uc2lubmNJQlIrcUZMU2pERlJWOUI4M1BBdlNSU1hM ZnpwbWgrdU4zY1dWL2k4N2xOb25wTlJUSXR3bUFvZjNRU2NPR0lsOUhiR2tWRDM1 bUgvTDhQWVJMMXZlME0yUjdYbXBsWUY5NkhkU3NML3BiYlVLeGFpa0FwTmJCM2VW U0FTYXdUUEdnYmhBWEtkNEE2YXRkSU5IRHRVbXVqWGxhYTlBL0JhN2h3SU1Ic1kr R3RpL1RzQzhacmN2ZGUzQkh3cHZJdkpHdnRRUS9IZzVhSTMvRE5yazlUYjhQWUpH RlBPTGU4ajU1YUtWYVdmeFZNZnBZYW9FL3pwN09ibEVFWnpLY3A4aFZ4S3p3eFJx Vk1ueGJwYm9TbXBwVzd0Zjc1RXZiUVFyUEUwd2Mzc3EzSm9kMkRYUGlFQmFNOVhB eGZvSTM5YmZ2L2tNUU9ES3p6U2x6SnV1cTFQZUFMVEh0SmVUQld0Wk1jdytFV3p4 dTNkMndSMWJBQWtpdFVHNkxhRjdZN1BDR2dQWDdSelNVL3Y5bE9Qa0VFMUZhbUxM TFJCZVU3NDU1WU9HSlFJb09TOVRsbGJyMWRVRi91cmRJbitDS2JhUHRWQ0k1TjNK SWNFYzdiYTZsd3dBMkszQVNwY2lXM1MzUmxkcVV0NFdUbXBpcU9BVytETm1qcmts eElFZ0YxOEZ0MzQ5MVBJMUlBWU05UVYrcDVEWnBzV28ycThTSWs3ZC9QL3hpVCtN QkVxMDNNRjA0cWtPeDczeEplN0NqbWJ5UCtBbzluUWpocWpwWmxxWmJoc1RkS3FR M252NXpMTmFDdUMwMFhiYUIvTmRjTGl3VEt4WnV3WHp6MFhmcE9ORnVheG9SWU9R Z01WZlhSSTZ0RXNGaWdUVWgvQm5LaWNjWm5nYWVwNHNIL0Y5MjdES1FIdCtXalc5 OHpHT24rZGFRVWxOV1hOOXlRNERoWFk1WmcremJHZ3hEZi90d1hMMjIzeS9zT0Rt WXlJb2ZUWDNYRWN4YWdOOHBhSStkODNOWEJHdE4vRkNlNmpKejh2R2Fzci8wdnJy c2w4RWozV0FaMXFLWlZsd1hYMTA3V2hzZXV5bEFVaVFZZWVKZ2NmSXRPVHpSYWs4 WHVSekw1eVloR2gxU1hMTURPemNBTE5FWEY5cDg5TmFLSGFNcHUwU1hKWUZidlFS UFNFUk42U2dnSkFLZXMxeDQxMk42STh2MXczdzJPUldDNEUya1RMUk5oNUhReWxp MjhNMnUzNDNtVzlUU2NrN3ZPTWkza2ZuN0VxbUQ3bWl2TElTaENhR3I3TFpJY2R3 bVFoNWtYdDN4NXVzdVZYMGhnMEtIZHo1TWpMenR3bnhxK1M4M3h6eVlHQlFmbnVa Yk0vZ21OVlNiQVd3cm5pYmZuUi9aR2dZOGZFOG8vcFBiU295dWo0RE1GU0pQWmN6 WHNSY004N21ZTWExd0RZdHl5ckYwSnFEQ3FZUmhQYmlSL25XSXhCckFZZW1wZXpu V2RucjBJNjBaYnZ5VDhIc3M0dVQwNU0xN1VwZVdNTkxReUFtZHVqVE1FQUF4NWVx N3llYmFGRWFvbVVFdWVFK3FmaWU0d3FjSEdJTlpVbmNFWlNqTXdBcXIwOFNGM0w4 Z1M3ejk4bUtoYVIwM1p3QmVpLzJGa204SnZEbExRY1o4K2JPaEtwUGtXRTU1MjUr Y082ZjRkUXd4cXpsL0YwYzRDWDZvSmJaVWNoM3BXN1JicE1oaU1vOTFuSFlEMnp2 SFhnclhWMHBIRFlsaHFZVkJlWVRaZkQ5dTE1djluMDRwWHREbXltZXdlS2JIeWtH Q0V1aG81WlI4T0JRRUl2czdaZGVtSFJrR2J1SENyMHpvVjFPam9MUk1xMmE2UzJI ZWt0ZlJ0MlkyRHlIRzUzNDQrTmZzcGJYYWkxdVBhd1pkL1Y2eXRFMUJtLzdMYklQ WVZRaHdINzY3aFFpUHY3bGZJaTQ4WGJoQ3RNakxDTmtaK1RHblBrQWw1Rmwxd0RC eUNFMk13dlBUUlN5aEJHZlMzREJiM3JaSEFTL1Z1VFRmRHRnZFJJY2lyUzJBeWI4 REE0S1lNdDlVQW5qOUxSc0NRdUNUdVVvTXhPRXdIVThRZm5MM3YvY1MrZy9vUG5L U0crZ1hqVXozWWkyWFd6OHpWaHF1OHZJQWJoTVV2REtsWGtwTTJQVm9XTE5zQUtW SVpKTTEvNlMxeDhnNkp2Y3ZNK1poQjd2eGdQVE1iZFVOZ24wRFd0em1wUHZtVWc2 NHREOW00WFdlczJFODZXQ1JBY0lOY2FFYk1QaVFNeUlaUUQ5U2V0dXVQVDdUV3lq SkRUQjNYWU0zc2lLN3ZiZ242N1cxRlZLamhZWGRCQm5tY2wyNnkzbWsyU0VPcmd4 UVU5RDZCakRJOHhDQklTNWxnd212dXA0WWc3dCtSaUgwbG9XUE93RHEyRHBvSnJO MEN3K0loMk4rRVNTNmgyaEpOTjFxTmN5c0pPK0NqR3U4d3ZIeG90OVlRaWptVXNE YXJmQ3A0ZXRrZjF4Y0VkcVpnb1pqZ2M0K3k4M3Jhb1FEdz09In0= -----END ENCRYPTED SIGSTORE PRIVATE KEY----- ================================================ FILE: tests/data/keys/rsa_private.key ================================================ -----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEAzk4HyifDz3BhYUj4YDk6SjwpZgCh1dkEaL/E1/k8VUFvDH53 iJ/dwbIotd920y86nqA1P8JoKbTLuDXMubyCDC7Cx/zBxwVnH2F1qeB50mun2lR0 BEWP2cp8xSIl7Ns0rDFjAX6/vfJcYrzlD/xmGNtkdeterVxHb272R63kVXOfteiQ Xw4pvGUNvtjk4eeCp7n8U34zPLSzVa9C7gAMtDAeuWL4sQvTZCkb5dJ5HePbpdQ1 uEHakxiCQ25x+AqvGkvcbDxad9dUm6JAwKAcfSIWsy0Ie7Pp9KeXbGxQv0Xb1kYa 6hsKtvGlKOrvu33zgwM4dfJjUXT8pkDxHdZAhT+MYfKVl1ANVSgzypezvGQY7fAc j+LXcsVtv6q0SS7e6XyddrI/mQSZdeHD5pT5cO0uLCTJwukDD1Q800xcCz98EW4w VPZGU33vB3MEKflt/FFsxmlMEjw62dqh7JOn4Mcmk684npOjffx13Dacrxn76k7H dLXu0EO56qkg00gx2s7+AVAyf0MD3YvbH3rCtD0aMctfK3CSRuQZYtqXUVjsgzm3 nHPCq8iFCGKfBdk3DADp1NlTMqCMSMuUjAk2/2BL5iCfGYEJ9+TycaLDNpBQdJVJ vzrBsKngdy50X4kXTLIx2vss54Lk4CWs0+TX0RN7wiVJBjmZgHc5u5yJIJcCAwEA AQKCAgB+R08HU43MxLompVa692yRkf+5Gvv0fNDxGSjxFfLzMIk7uZGLRGelr1qx 8KW4ILmd7OyLKYE+vhbQm8XDjvp/YIQDi9hE7S6xC6PNJsUKorDsuDMHhljF8+ap d/yE3ayBFf3HJYFSUC5ylbMUNOd9oZT9hOO/87MaJ26Cc5NHJu4El+T++hlb4vMl 9XcsO9xCtFoZ9S6Bow3+jbfHHKqqBKZZzZXyMQ3kyjD0XP+b5yREff+f2FdlIGRj yA/kxw1laDf03IB3yItWdFt0TM0DX0FLzW3a4kZ7ZbYPPMG0QpuMrf69e230izcQ M7YoKrFKaUc/Eu3uJ1CapzevjryQePLUJdWldwVUipZeCQNKVmeu7G/a59rFTwsw gwghk/eUh+gssSbgUVk1J7qybx1njkiAdSOSjRxLNUGkdQFpxwUiOx0WyXysIffv qVYvYJBCD2gf9hYb0qQcdd4nud1PyECjXVS95SorvEJDCisuoBzAvMj5iHduJimm VnUqEI5qv6ZXE9mUKZW6IISlsf7EuM6QBUKuF4VXF+ynvMGTctHqXW/eoGxnugmj NrXMzvqOGRn5kXs5e3NM9UdyzOZqAh/L8wB5BQKnIoJm2sAZeLQaxOYdo+ntKIsh NcFI0+KagSKGUMF2rLxOf1BOw/dE0Ir5u0avE+YgKAk7XXG0wQKCAQEA+nT0Ql2/ PT3GqfoHyWpvcmttDPY6yTpy9TFltFABZplPpsHaWfn4wWQ3FT4aBmYF962MXgaf LlbQMHed2F3thhTyeEJUrj/L8dK7tB9OjqGQJFlstEKXQZh+eaWlxkikY2khjao8 B9TFcPcMmCz0EFWVIrOIDEvgbwgp5nzopAsfqz9TRadnOOgL7x8PjVWzRA/SHuEv FcTZuTAQyXI8oRpKYLtg2q7rptKjH/tkxf7IAmCow7vk4g3Cj/ZXfXzCD6CMPuTf X4WFqJUnBYuwlhfHLcT6vOiNLhIq7sV3wle5UGkh4GQ6qWP3/r1TKVtyW7oduIJs m/8duw6b9Ct99wKCAQEA0t7rFwjFY98wJbGMB5dsKkA/WnTGBNSOkNf257DPyZeE 3ljqwq6yarnpB6fu989yG8KUhX81d0BtNJL+T71CwRL6LNxlDoYdKTuWZYHUFHNh u0BRz80d7gSiPai5ZBzgFpIFaDTwMaJjX2RLGEEgcLYFD1LtsZNSyY9FC38Vr+9k zUQ5W5rzxYGw4rRaEL0ovvjYLFz75ns2zPZapCt0c/z/Qd9kyibgGDE3bnL6gkvo aDCkQr4Xc3qy+jJzVsv0n+lcdHBfP/qlcpEWyapXM3/a9OK/KAYsaJa/RBpA+l+H g4YQVwwYODugSmzYKme1JU4h804EN6lHtSvzYkNKYQKCAQAjhpNfFo0Z0rlrQtv3 5fEI+dPuEr8j6/aCcQ9MFE0ekICL1tNyD9MJG330tWpbnf0atLNEYwwRNp8xQMZS +n/GlRIPnNkGHmZ/VrTpR8eM073uagDRUODDnS3Tc3ugNI2czDzGK294bOXUsDZJ H5c++eS9l1mk5N5g4XeQCge1vR4w3Dqjlqs9lyyaLn22PoG/Fb9oQei73cBEVF0N NfcDowcJ0YpbepRShW4+CxqwOwOD0tIdcXl11x3R7c9bLWcZcFx0T2Kf2gCrePyf /MB/ib/m7hni0dm0vz73v2rNVkQi88aqXY00mcmDiLdTFnWSLUQp99YQCo/dCKV2 bPThAoIBAQCuRH3CpoQCmoN+0zEnYPOKI1h4GANCIKvFdkVdipjeQDMVUiSJSbi3 TPcRVa6+65ig6ni1rsBv0jWt+kDjg0S0rUtFYcq+awWUeuM69kVftU8yYeB6vEgc 2YV/MX4tB1QGMxz21rEeQ9aeEhOhcsktfK/Hz0ASve7wFk/4RUmWAWCr5tMEKpWF Rz34zRWVuc3/rUVxvFKNUoyibIHSJPtzk8UcGlOAYQpX0+y8gZcXsUXbPT+yzMgy rldVP/Zj5+A9e6zlqax+AlVSzicn+HdiXyqDsRRLLnbq5JIi5ROIFwS2JEhCuAMY DebVOwiWWuiwcNbL7VC881AIoM7eCUBhAoIBAC4ZPQ0qHhKz4DUHNevoIje9aor1 8711SwQuUpjJ3iVmhLwLPplZZaMq6dNgjPTBosZmtnjKsrR8Kwn/ZPq8siR9Ii41 TRdvT5fiChuaJCT+cUKgvb/vri+hihxT6Sd7YCGvrN/e76Vwz1Bes1DZW7bJN6hU 3Ha8N23Juv0Sb/2zTi2snVClX76l2ftKUUHcuUPCHvhpf/T8XYSIbVxJYBSWVtTv oxQ0q2S/Rle1u/WE0qFfZhfJscdm07Oq+OeXFa6ZiAJ1xjumi4YxpYOdBZPguVlh E5vzvuF5lXGTDVZWBnnI1PZTc96d3NKY86K6w9dQfPhEqTM8QfR7YNqzhw8= -----END RSA PRIVATE KEY----- ================================================ FILE: trust_root/prod/root.json ================================================ { "signatures": [ { "keyid": "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3", "sig": "" }, { "keyid": "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", "sig": "3045022100b0bcf189ce1b93e7db9649d5be512a1880c0e358870e3933e426c5afb8a4061002206d214bd79b09f458ccc521a290aa960c417014fc16e606f82091b5e31814886a" }, { "keyid": "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", "sig": "" }, { "keyid": "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", "sig": "3045022100a9b9e294ec21b62dfca6a16a19d084182c12572e33d9c4dcab5317fa1e8a459d022069f68e55ea1f95c5a367aac7a61a65757f93da5a006a5f4d1cf995be812d7602" }, { "keyid": "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70", "sig": "30440220781178ec3915cb16aca757d40e28435ac5378d6b487acb111d1eeb339397f79a0220781cce48ae46f9e47b97a8414fcf466a986726a5896c72a0e4aba3162cb826dd" } ], "signed": { "_type": "root", "consistent_snapshot": true, "expires": "2025-08-19T14:33:09Z", "keys": { "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5": { "keyid_hash_algorithms": [ "sha256", "sha512" ], "keytype": "ecdsa", "keyval": { "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWRiGr5+j+3J5SsH+Ztr5nE2H2wO7\nBV+nO3s93gLca18qTOzHY1oWyAGDykMSsGTUBSt9D+An0KfKsD2mfSM42Q==\n-----END PUBLIC KEY-----\n" }, "scheme": "ecdsa-sha2-nistp256", "x-tuf-on-ci-online-uri": "gcpkms:projects/sigstore-root-signing/locations/global/keyRings/root/cryptoKeys/timestamp/cryptoKeyVersions/1" }, "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06": { "keyid_hash_algorithms": [ "sha256", "sha512" ], "keytype": "ecdsa", "keyval": { "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEzBzVOmHCPojMVLSI364WiiV8NPrD\n6IgRxVliskz/v+y3JER5mcVGcONliDcWMC5J2lfHmjPNPhb4H7xm8LzfSA==\n-----END PUBLIC KEY-----\n" }, "scheme": "ecdsa-sha2-nistp256", "x-tuf-on-ci-keyowner": "@santiagotorres" }, "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222": { "keyid_hash_algorithms": [ "sha256", "sha512" ], "keytype": "ecdsa", "keyval": { "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEinikSsAQmYkNeH5eYq/CnIzLaacO\nxlSaawQDOwqKy/tCqxq5xxPSJc21K4WIhs9GyOkKfzueY3GILzcMJZ4cWw==\n-----END PUBLIC KEY-----\n" }, "scheme": "ecdsa-sha2-nistp256", "x-tuf-on-ci-keyowner": "@bobcallaway" }, "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3": { "keyid_hash_algorithms": [ "sha256", "sha512" ], "keytype": "ecdsa", "keyval": { "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEy8XKsmhBYDI8Jc0GwzBxeKax0cm5\nSTKEU65HPFunUn41sT8pi0FjM4IkHz/YUmwmLUO0Wt7lxhj6BkLIK4qYAw==\n-----END PUBLIC KEY-----\n" }, "scheme": "ecdsa-sha2-nistp256", "x-tuf-on-ci-keyowner": "@dlorenc" }, "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70": { "keyid_hash_algorithms": [ "sha256", "sha512" ], "keytype": "ecdsa", "keyval": { "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE0ghrh92Lw1Yr3idGV5WqCtMDB8Cx\n+D8hdC4w2ZLNIplVRoVGLskYa3gheMyOjiJ8kPi15aQ2//7P+oj7UvJPGw==\n-----END PUBLIC KEY-----\n" }, "scheme": "ecdsa-sha2-nistp256", "x-tuf-on-ci-keyowner": "@joshuagl" }, "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2": { "keyid_hash_algorithms": [ "sha256", "sha512" ], "keytype": "ecdsa", "keyval": { "public": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEXsz3SZXFb8jMV42j6pJlyjbjR8K\nN3Bwocexq6LMIb5qsWKOQvLN16NUefLc4HswOoumRsVVaajSpQS6fobkRw==\n-----END PUBLIC KEY-----\n" }, "scheme": "ecdsa-sha2-nistp256", "x-tuf-on-ci-keyowner": "@mnm678" } }, "roles": { "root": { "keyids": [ "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3", "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70" ], "threshold": 3 }, "snapshot": { "keyids": [ "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5" ], "threshold": 1, "x-tuf-on-ci-expiry-period": 3650, "x-tuf-on-ci-signing-period": 365 }, "targets": { "keyids": [ "6f260089d5923daf20166ca657c543af618346ab971884a99962b01988bbe0c3", "e71a54d543835ba86adad9460379c7641fb8726d164ea766801a1c522aba7ea2", "22f4caec6d8e6f9555af66b3d4c3cb06a3bb23fdc7e39c916c61f462e6f52b06", "61643838125b440b40db6942f5cb5a31c0dc04368316eb2aaa58b95904a58222", "a687e5bf4fab82b0ee58d46e05c9535145a2c9afb458f43d42b45ca0fdce2a70" ], "threshold": 3 }, "timestamp": { "keyids": [ "0c87432c3bf09fd99189fdc32fa5eaedf4e4a5fac7bab73fa04a2e0fc64af6f5" ], "threshold": 1, "x-tuf-on-ci-expiry-period": 7, "x-tuf-on-ci-signing-period": 6 } }, "spec_version": "1.0", "version": 12, "x-tuf-on-ci-expiry-period": 197, "x-tuf-on-ci-signing-period": 46 } } ================================================ FILE: trust_root/prod/trusted_root.json ================================================ { "mediaType": "application/vnd.dev.sigstore.trustedroot+json;version=0.1", "tlogs": [ { "baseUrl": "https://rekor.sigstore.dev", "hashAlgorithm": "SHA2_256", "publicKey": { "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+2tabdTV5BcGiBIx0a9fAFwrkBbmLSGtks4L3qX6yYY0zufBnhC8Ur/iy55GhWP/9A/bY2LhC30M9+RYtw==", "keyDetails": "PKIX_ECDSA_P256_SHA_256", "validFor": { "start": "2021-01-12T11:53:27.000Z" } }, "logId": { "keyId": "wNI9atQGlz+VWfO6LRygH4QUfY/8W4RFwiT5i5WRgB0=" } } ], "certificateAuthorities": [ { "subject": { "organization": "sigstore.dev", "commonName": "sigstore" }, "uri": "https://fulcio.sigstore.dev", "certChain": { "certificates": [ { "rawBytes": "MIIB+DCCAX6gAwIBAgITNVkDZoCiofPDsy7dfm6geLbuhzAKBggqhkjOPQQDAzAqMRUwEwYDVQQKEwxzaWdzdG9yZS5kZXYxETAPBgNVBAMTCHNpZ3N0b3JlMB4XDTIxMDMwNzAzMjAyOVoXDTMxMDIyMzAzMjAyOVowKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTB2MBAGByqGSM49AgEGBSuBBAAiA2IABLSyA7Ii5k+pNO8ZEWY0ylemWDowOkNa3kL+GZE5Z5GWehL9/A9bRNA3RbrsZ5i0JcastaRL7Sp5fp/jD5dxqc/UdTVnlvS16an+2Yfswe/QuLolRUCrcOE2+2iA5+tzd6NmMGQwDgYDVR0PAQH/BAQDAgEGMBIGA1UdEwEB/wQIMAYBAf8CAQEwHQYDVR0OBBYEFMjFHQBBmiQpMlEk6w2uSu1KBtPsMB8GA1UdIwQYMBaAFMjFHQBBmiQpMlEk6w2uSu1KBtPsMAoGCCqGSM49BAMDA2gAMGUCMH8liWJfMui6vXXBhjDgY4MwslmN/TJxVe/83WrFomwmNf056y1X48F9c4m3a3ozXAIxAKjRay5/aj/jsKKGIkmQatjI8uupHr/+CxFvaJWmpYqNkLDGRU+9orzh5hI2RrcuaQ==" } ] }, "validFor": { "start": "2021-03-07T03:20:29.000Z", "end": "2022-12-31T23:59:59.999Z" } }, { "subject": { "organization": "sigstore.dev", "commonName": "sigstore" }, "uri": "https://fulcio.sigstore.dev", "certChain": { "certificates": [ { "rawBytes": "MIICGjCCAaGgAwIBAgIUALnViVfnU0brJasmRkHrn/UnfaQwCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMjA0MTMyMDA2MTVaFw0zMTEwMDUxMzU2NThaMDcxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjEeMBwGA1UEAxMVc2lnc3RvcmUtaW50ZXJtZWRpYXRlMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE8RVS/ysH+NOvuDZyPIZtilgUF9NlarYpAd9HP1vBBH1U5CV77LSS7s0ZiH4nE7Hv7ptS6LvvR/STk798LVgMzLlJ4HeIfF3tHSaexLcYpSASr1kS0N/RgBJz/9jWCiXno3sweTAOBgNVHQ8BAf8EBAMCAQYwEwYDVR0lBAwwCgYIKwYBBQUHAwMwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQU39Ppz1YkEZb5qNjpKFWixi4YZD8wHwYDVR0jBBgwFoAUWMAeX5FFpWapesyQoZMi0CrFxfowCgYIKoZIzj0EAwMDZwAwZAIwPCsQK4DYiZYDPIaDi5HFKnfxXx6ASSVmERfsynYBiX2X6SJRnZU84/9DZdnFvvxmAjBOt6QpBlc4J/0DxvkTCqpclvziL6BCCPnjdlIB3Pu3BxsPmygUY7Ii2zbdCdliiow=" }, { "rawBytes": "MIIB9zCCAXygAwIBAgIUALZNAPFdxHPwjeDloDwyYChAO/4wCgYIKoZIzj0EAwMwKjEVMBMGA1UEChMMc2lnc3RvcmUuZGV2MREwDwYDVQQDEwhzaWdzdG9yZTAeFw0yMTEwMDcxMzU2NTlaFw0zMTEwMDUxMzU2NThaMCoxFTATBgNVBAoTDHNpZ3N0b3JlLmRldjERMA8GA1UEAxMIc2lnc3RvcmUwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAT7XeFT4rb3PQGwS4IajtLk3/OlnpgangaBclYpsYBr5i+4ynB07ceb3LP0OIOZdxexX69c5iVuyJRQ+Hz05yi+UF3uBWAlHpiS5sh0+H2GHE7SXrk1EC5m1Tr19L9gg92jYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRYwB5fkUWlZql6zJChkyLQKsXF+jAfBgNVHSMEGDAWgBRYwB5fkUWlZql6zJChkyLQKsXF+jAKBggqhkjOPQQDAwNpADBmAjEAj1nHeXZp+13NWBNa+EDsDP8G1WWg1tCMWP/WHPqpaVo0jhsweNFZgSs0eE7wYI4qAjEA2WB9ot98sIkoF3vZYdd3/VtWB5b9TNMea7Ix/stJ5TfcLLeABLE4BNJOsQ4vnBHJ" } ] }, "validFor": { "start": "2022-04-13T20:06:15.000Z" } } ], "ctlogs": [ { "baseUrl": "https://ctfe.sigstore.dev/test", "hashAlgorithm": "SHA2_256", "publicKey": { "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEbfwR+RJudXscgRBRpKX1XFDy3PyudDxz/SfnRi1fT8ekpfBd2O1uoz7jr3Z8nKzxA69EUQ+eFCFI3zeubPWU7w==", "keyDetails": "PKIX_ECDSA_P256_SHA_256", "validFor": { "start": "2021-03-14T00:00:00.000Z", "end": "2022-10-31T23:59:59.999Z" } }, "logId": { "keyId": "CGCS8ChS/2hF0dFrJ4ScRWcYrBY9wzjSbea8IgY2b3I=" } }, { "baseUrl": "https://ctfe.sigstore.dev/2022", "hashAlgorithm": "SHA2_256", "publicKey": { "rawBytes": "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEiPSlFi0CmFTfEjCUqF9HuCEcYXNKAaYalIJmBZ8yyezPjTqhxrKBpMnaocVtLJBI1eM3uXnQzQGAJdJ4gs9Fyw==", "keyDetails": "PKIX_ECDSA_P256_SHA_256", "validFor": { "start": "2022-10-20T00:00:00.000Z" } }, "logId": { "keyId": "3T0wasbHETJjGR4cmWc3AqJKXrjePK3/h4pygC8p7o4=" } } ] }