Repository: theseus-rs/postgresql-embedded Branch: main Commit: 1ba09d45f1bb Files: 183 Total size: 786.5 KB Directory structure: gitextract_ipdr6byb/ ├── .github/ │ ├── FUNDING.yml │ ├── ISSUE_TEMPLATE/ │ │ ├── bug_report.md │ │ ├── config.yml │ │ └── feature_request.md │ ├── codecov.yml │ ├── dependabot.yml │ └── workflows/ │ ├── benchmarks.yml │ ├── checks.yml │ ├── ci.yml │ ├── pr-benchmarks.yml │ └── release-plz.yml ├── .gitignore ├── .rustfmt.toml ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── SECURITY.md ├── clippy.toml ├── deny.toml ├── examples/ │ ├── archive_async/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── main.rs │ ├── archive_sync/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── main.rs │ ├── axum_embedded/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── main.rs │ ├── diesel_embedded/ │ │ ├── Cargo.toml │ │ ├── README.md │ │ ├── diesel.toml │ │ ├── migrations/ │ │ │ ├── .keep │ │ │ └── 2024-08-17-200823_create_posts/ │ │ │ ├── down.sql │ │ │ └── up.sql │ │ └── src/ │ │ ├── main.rs │ │ ├── models.rs │ │ └── schema.rs │ ├── download_progress_bar/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── main.rs │ ├── embedded_async/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── main.rs │ ├── embedded_sync/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── main.rs │ ├── portal_corp_extension/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── main.rs │ ├── postgres_embedded/ │ │ ├── Cargo.toml │ │ ├── README.md │ │ └── src/ │ │ └── main.rs │ ├── sqlx_embedded/ │ │ ├── Cargo.toml │ │ ├── README.md │ │ └── src/ │ │ └── main.rs │ ├── tensor_chord_extension/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── main.rs │ ├── unix_socket/ │ │ ├── Cargo.toml │ │ └── src/ │ │ └── main.rs │ └── zonky/ │ ├── Cargo.toml │ └── src/ │ └── main.rs ├── postgresql_archive/ │ ├── Cargo.toml │ ├── README.md │ ├── benches/ │ │ └── archive.rs │ ├── src/ │ │ ├── archive.rs │ │ ├── blocking/ │ │ │ ├── archive.rs │ │ │ └── mod.rs │ │ ├── configuration/ │ │ │ ├── custom/ │ │ │ │ ├── matcher.rs │ │ │ │ └── mod.rs │ │ │ ├── mod.rs │ │ │ ├── theseus/ │ │ │ │ ├── extractor.rs │ │ │ │ ├── matcher.rs │ │ │ │ └── mod.rs │ │ │ └── zonky/ │ │ │ ├── extractor.rs │ │ │ ├── matcher.rs │ │ │ ├── mod.rs │ │ │ └── repository.rs │ │ ├── error.rs │ │ ├── extractor/ │ │ │ ├── mod.rs │ │ │ ├── model.rs │ │ │ ├── registry.rs │ │ │ ├── tar_gz_extractor.rs │ │ │ ├── tar_xz_extractor.rs │ │ │ └── zip_extractor.rs │ │ ├── hasher/ │ │ │ ├── md5.rs │ │ │ ├── mod.rs │ │ │ ├── registry.rs │ │ │ ├── sha1.rs │ │ │ ├── sha2_256.rs │ │ │ └── sha2_512.rs │ │ ├── lib.rs │ │ ├── matcher/ │ │ │ ├── mod.rs │ │ │ └── registry.rs │ │ ├── repository/ │ │ │ ├── github/ │ │ │ │ ├── mod.rs │ │ │ │ ├── models.rs │ │ │ │ └── repository.rs │ │ │ ├── maven/ │ │ │ │ ├── mod.rs │ │ │ │ ├── models.rs │ │ │ │ └── repository.rs │ │ │ ├── mod.rs │ │ │ ├── model.rs │ │ │ └── registry.rs │ │ └── version.rs │ └── tests/ │ ├── archive.rs │ ├── blocking.rs │ └── zonky.rs ├── postgresql_commands/ │ ├── Cargo.toml │ ├── README.md │ └── src/ │ ├── clusterdb.rs │ ├── createdb.rs │ ├── createuser.rs │ ├── dropdb.rs │ ├── dropuser.rs │ ├── ecpg.rs │ ├── error.rs │ ├── initdb.rs │ ├── lib.rs │ ├── oid2name.rs │ ├── pg_amcheck.rs │ ├── pg_archivecleanup.rs │ ├── pg_basebackup.rs │ ├── pg_checksums.rs │ ├── pg_config.rs │ ├── pg_controldata.rs │ ├── pg_ctl.rs │ ├── pg_dump.rs │ ├── pg_dumpall.rs │ ├── pg_isready.rs │ ├── pg_receivewal.rs │ ├── pg_recvlogical.rs │ ├── pg_resetwal.rs │ ├── pg_restore.rs │ ├── pg_rewind.rs │ ├── pg_test_fsync.rs │ ├── pg_test_timing.rs │ ├── pg_upgrade.rs │ ├── pg_verifybackup.rs │ ├── pg_waldump.rs │ ├── pgbench.rs │ ├── postgres.rs │ ├── psql.rs │ ├── reindexdb.rs │ ├── traits.rs │ ├── vacuumdb.rs │ └── vacuumlo.rs ├── postgresql_embedded/ │ ├── Cargo.toml │ ├── README.md │ ├── benches/ │ │ └── embedded.rs │ ├── build/ │ │ ├── build.rs │ │ └── bundle.rs │ ├── src/ │ │ ├── blocking/ │ │ │ ├── mod.rs │ │ │ └── postgresql.rs │ │ ├── error.rs │ │ ├── lib.rs │ │ ├── postgresql.rs │ │ └── settings.rs │ └── tests/ │ ├── blocking.rs │ ├── dump_command.rs │ ├── environment_variables.rs │ ├── postgresql.rs │ ├── start_config.rs │ ├── unix_socket.rs │ └── zonky.rs ├── postgresql_extensions/ │ ├── Cargo.toml │ ├── README.md │ ├── src/ │ │ ├── blocking/ │ │ │ ├── extensions.rs │ │ │ └── mod.rs │ │ ├── error.rs │ │ ├── extensions.rs │ │ ├── lib.rs │ │ ├── matcher.rs │ │ ├── model.rs │ │ └── repository/ │ │ ├── mod.rs │ │ ├── model.rs │ │ ├── portal_corp/ │ │ │ ├── mod.rs │ │ │ └── repository.rs │ │ ├── registry.rs │ │ ├── steampipe/ │ │ │ ├── extensions.rs │ │ │ ├── mod.rs │ │ │ └── repository.rs │ │ └── tensor_chord/ │ │ ├── mod.rs │ │ └── repository.rs │ └── tests/ │ ├── blocking.rs │ ├── extensions.rs │ ├── portal_corp.rs │ └── steampipe.rs ├── release-plz.toml └── rust-toolchain.toml ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/FUNDING.yml ================================================ github: brianheineman ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.md ================================================ --- name: "\U0001F41E Bug Report" about: "If something isn't working as expected \U0001F914." title: '' labels: 'i: bug, i: needs triage' assignees: '' --- **What steps will reproduce the bug? (please provide code snippet if relevant)** 1. step 1 2. step 2 3. ... **What happens?** ... **What did you expect to happen instead?** ... ### Information about your environment * postgresql_embedded version: [REQUIRED] (e.g. "0.14.2") * Database version: [REQUIRED] (e.g. "16.4.0") * Operating system: [REQUIRED] ================================================ FILE: .github/ISSUE_TEMPLATE/config.yml ================================================ blank_issues_enabled: false ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.md ================================================ --- name: "\U00002728 Feature Request" about: "I have a suggestion (and may want to implement it \U0001F642)!" title: '' labels: 'i: enhancement, i: needs triage' assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context about the feature request here. ================================================ FILE: .github/codecov.yml ================================================ coverage: status: patch: default: threshold: 0.05% project: default: threshold: 0.05% ================================================ FILE: .github/dependabot.yml ================================================ version: 2 updates: - package-ecosystem: "cargo" directory: "/" schedule: interval: "monthly" ================================================ FILE: .github/workflows/benchmarks.yml ================================================ name: Benchmarks on: push: branches: - main pull_request: types: [ opened, reopened, synchronize ] permissions: contents: read jobs: benchmark: name: Run Benchmarks runs-on: ubuntu-latest permissions: pull-requests: write steps: - name: Checkout source code uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@master with: components: 'llvm-tools-preview' toolchain: stable - name: Install benchmarking tools uses: bencherdev/bencher@main - name: Run benchmarks if: ${{ github.event_name == 'pull_request' }} env: BENCHER_API_TOKEN: ${{ secrets.BENCHER_API_TOKEN }} BENCHER_PROJECT: theseus-rs-postgresql-embedded BENCHER_ADAPTER: rust_criterion run: | bencher run \ --branch $GITHUB_HEAD_REF \ --ci-number "${{ github.event.number }}" \ --github-actions "${{ secrets.GITHUB_TOKEN }}" \ --err \ "cargo bench --features blocking" - name: Run benchmarks if: ${{ github.event_name != 'pull_request' }} env: BENCHER_API_TOKEN: ${{ secrets.BENCHER_API_TOKEN }} BENCHER_PROJECT: theseus-rs-postgresql-embedded BENCHER_ADAPTER: rust_criterion run: | bencher run "cargo bench --features blocking" ================================================ FILE: .github/workflows/checks.yml ================================================ name: Fast checks env: CARGO_TERM_COLOR: always RUSTFLAGS: "-D warnings" on: workflow_call: permissions: contents: read jobs: audit: runs-on: ubuntu-24.04 steps: - name: Checkout repository uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: stable - name: Install cargo audit run: cargo install cargo-audit - name: Audit dependencies run: cargo audit check: runs-on: ubuntu-24.04 steps: - name: Checkout repository uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: stable - name: Check the project run: | cargo check --workspace --all-targets --features blocking cargo check --workspace --all-targets --features bundled cargo check --workspace --all-targets --features tokio cargo check --workspace --all-targets --all-features clippy: runs-on: ubuntu-24.04 steps: - name: Checkout repository uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: stable components: clippy - name: Check lints env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} run: | cargo clippy --all-targets --all-features --examples --tests deny: runs-on: ubuntu-24.04 steps: - name: Checkout repository uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: stable - name: Install cargo deny run: cargo install cargo-deny - name: Check licenses run: cargo deny check --allow duplicate doc: runs-on: ubuntu-24.04 steps: - name: Checkout repository uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: stable - name: Check documentation env: RUSTDOCFLAGS: -D warnings run: cargo doc --workspace --no-deps --document-private-items --all-features fmt: runs-on: ubuntu-24.04 steps: - name: Checkout repository uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: stable components: rustfmt - name: Check formatting run: cargo fmt --all --check ================================================ FILE: .github/workflows/ci.yml ================================================ name: ci on: push: branches: - main pull_request: branches: - main permissions: contents: read jobs: checks: name: Checks uses: ./.github/workflows/checks.yml build: name: ${{ matrix.platform }} needs: [ checks ] runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: platform: - linux-arm - linux-x64 - macos-arm - macos-x64 - windows-x64 include: - platform: linux-arm os: ubuntu-24.04-arm - platform: linux-x64 os: ubuntu-latest - platform: macos-arm os: macos-15 - platform: macos-x64 os: macos-15-intel - platform: windows-x64 os: windows-2022 steps: - name: Checkout source code uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@master with: toolchain: stable - name: Install cargo-llvm-cov uses: taiki-e/install-action@main with: tool: cargo-llvm-cov - name: Tests if: ${{ matrix.platform != 'linux-x64' }} env: CARGO_TERM_COLOR: always GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} RUST_LOG: "info,postgresql_archive=debug,postgresql_commands=debug,postgresql_embedded=debug" RUST_LOG_SPAN_EVENTS: full run: | cargo test - name: Tests if: ${{ matrix.platform == 'linux-x64' }} env: CARGO_TERM_COLOR: always GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} RUST_LOG: "info,postgresql_archive=debug,postgresql_commands=debug,postgresql_embedded=debug" RUST_LOG_SPAN_EVENTS: full run: | cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info - name: Upload to codecov.io if: ${{ matrix.platform == 'linux-x64' }} uses: codecov/codecov-action@v4 with: files: lcov.info fail_ci_if_error: true verbose: true env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} ================================================ FILE: .github/workflows/pr-benchmarks.yml ================================================ name: Benchmarks on: pull_request: types: [ opened, reopened, synchronize ] permissions: contents: read jobs: benchmark: name: Run Benchmarks runs-on: ubuntu-22.04 permissions: pull-requests: write steps: - name: Checkout source code uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@master with: components: 'llvm-tools-preview' toolchain: stable - name: Install benchmarking tools uses: bencherdev/bencher@main - name: Run benchmarks env: BENCHER_API_TOKEN: ${{ secrets.BENCHER_API_TOKEN }} BENCHER_PROJECT: theseus-rs-postgresql-embedded BENCHER_ADAPTER: rust_criterion run: | bencher run \ --branch $GITHUB_HEAD_REF \ --ci-number "${{ github.event.number }}" \ --github-actions "${{ secrets.GITHUB_TOKEN }}" \ --err \ "cargo bench --features blocking" ================================================ FILE: .github/workflows/release-plz.yml ================================================ name: Release-plz permissions: pull-requests: write contents: write on: push: branches: - main jobs: # Release unpublished packages. release-plz-release: name: Release-plz release runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 with: fetch-depth: 0 - name: Install Rust toolchain uses: dtolnay/rust-toolchain@stable - name: Run release-plz uses: release-plz/action@v0.5 with: command: release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} # Create a PR with the new versions and changelog, preparing the next release. release-plz-pr: name: Release-plz PR runs-on: ubuntu-latest concurrency: group: release-plz-${{ github.ref }} cancel-in-progress: false steps: - name: Checkout repository uses: actions/checkout@v4 with: fetch-depth: 0 - name: Install Rust toolchain uses: dtolnay/rust-toolchain@stable - name: Run release-plz uses: release-plz/action@v0.5 with: command: release-pr env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} ================================================ FILE: .gitignore ================================================ /target # Rust Rover /.idea ================================================ FILE: .rustfmt.toml ================================================ newline_style = "Unix" ================================================ FILE: CHANGELOG.md ================================================ # Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] ## `postgresql_extensions` - [0.20.2](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_extensions-v0.20.1...postgresql_extensions-v0.20.2) - 2026-02-22 ### Other - remove num-format dependency ## `postgresql_embedded` - [0.20.2](https://github.com/theseus-rs/postgresql-embedded/compare/v0.20.1...v0.20.2) - 2026-02-22 ### Added - add unix socket support ### Other - remove num-format dependency ## `postgresql_commands` - [0.20.2](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_commands-v0.20.1...postgresql_commands-v0.20.2) - 2026-02-22 ### Added - add unix socket support ## `postgresql_archive` - [0.20.2](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_archive-v0.20.1...postgresql_archive-v0.20.2) - 2026-02-22 ### Added - add unix socket support ### Other - remove num-format dependency ## `postgresql_extensions` - [0.20.1](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_extensions-v0.20.0...postgresql_extensions-v0.20.1) - 2026-02-08 ### Other - update rust to 1.92.0 - reduce map_err by adding some From implementations ## `postgresql_embedded` - [0.20.1](https://github.com/theseus-rs/postgresql-embedded/compare/v0.20.0...v0.20.1) - 2026-02-08 ### Added - add postgresql v18 support ### Fixed - update to support all targets ### Other - Merge branch 'main' into caching_builds - Target - Cache archives - update rust to 1.92.0 - reduce map_err by adding some From implementations ## `postgresql_commands` - [0.20.1](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_commands-v0.20.0...postgresql_commands-v0.20.1) - 2026-02-08 ### Other - update rust to 1.92.0 ## `postgresql_archive` - [0.20.1](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_archive-v0.20.0...postgresql_archive-v0.20.1) - 2026-02-08 ### Other - update rust to 1.92.0 - reduce map_err by adding some From implementations ## `postgresql_extensions` - [0.20.0](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_extensions-v0.19.0...postgresql_extensions-v0.20.0) - 2025-08-31 ### Fixed - always use the build version of postgresql when the bundled feature is enabled to avoid network access ### Other - remove devcontainer support ## `postgresql_embedded` - [0.20.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.19.0...v0.20.0) - 2025-08-31 ### Fixed - always use the build version of postgresql when the bundled feature is enabled to avoid network access - [**breaking**] rename pg_dump compression argument to compress ### Other - minor doc updates - remove devcontainer support - correct lint errors - update to Rust 1.89.0 ## `postgresql_commands` - [0.20.0](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_commands-v0.19.0...postgresql_commands-v0.20.0) - 2025-08-31 ### Fixed - [**breaking**] rename pg_dump compression argument to compress ### Other - remove devcontainer support ## `postgresql_archive` - [0.20.0](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_archive-v0.19.0...postgresql_archive-v0.20.0) - 2025-08-31 ### Other - minor doc updates - remove devcontainer support ## `postgresql_embedded` - [0.19.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.18.7...v0.19.0) - 2025-06-24 ### Added - allow skipping the installation step during setup ### Other - correct typo in variable name - update extractor feature documentation ## `postgresql_archive` - [0.19.0](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_archive-v0.18.7...postgresql_archive-v0.19.0) - 2025-06-24 ### Other - update extractor feature documentation ## `postgresql_embedded` - [0.18.7](https://github.com/theseus-rs/postgresql-embedded/compare/v0.18.6...v0.18.7) - 2025-06-20 ### Fixed - set CREATE_NO_WINDOW creation flag on Windows ### Other - update Cargo.toml dependencies ## `postgresql_commands` - [0.18.7](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_commands-v0.18.6...postgresql_commands-v0.18.7) - 2025-06-20 ### Fixed - set CREATE_NO_WINDOW creation flag on Windows ## `postgresql_archive` - [0.18.7](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_archive-v0.18.6...postgresql_archive-v0.18.7) - 2025-06-20 ### Other - update Cargo.toml dependencies ## `postgresql_extensions` - [0.18.6](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_extensions-v0.18.5...postgresql_extensions-v0.18.6) - 2025-06-17 ### Added - add extractor feature flags ### Other - correct lint errors ## `postgresql_embedded` - [0.18.6](https://github.com/theseus-rs/postgresql-embedded/compare/v0.18.5...v0.18.6) - 2025-06-17 ### Added - add extractor feature flags ### Other - make liblzma an optional dependency - add documentation for bundled feature flag - correct lint errors ## `postgresql_archive` - [0.18.6](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_archive-v0.18.5...postgresql_archive-v0.18.6) - 2025-06-17 ### Added - add extractor feature flags ### Other - make liblzma an optional dependency - correct lint errors ## `postgresql_extensions` - [0.18.5](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_extensions-v0.18.4...postgresql_extensions-v0.18.5) - 2025-05-28 ### Other - update Cargo.toml dependencies ## `postgresql_embedded` - [0.18.5](https://github.com/theseus-rs/postgresql-embedded/compare/v0.18.4...v0.18.5) - 2025-05-28 ### Fixed - correct theseus build bundle - revert SupportFn type change - custom release url not working and compilation failure ### Other - Merge branch 'main' into main - update to criterion=0.6.0, pgvector=0.4.1, reqwest=0.12.18, sqlx=0.8.6, tokio=1.45.1, zip=4.0.0 - minor syntax change - update Cargo.toml dependencies ## `postgresql_commands` - [0.18.5](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_commands-v0.18.4...postgresql_commands-v0.18.5) - 2025-05-28 ### Other - update Cargo.toml dependencies ## `postgresql_archive` - [0.18.5](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_archive-v0.18.4...postgresql_archive-v0.18.5) - 2025-05-28 ### Fixed - correct theseus build bundle - revert SupportFn type change - custom release url not working and compilation failure ### Other - update to criterion=0.6.0, pgvector=0.4.1, reqwest=0.12.18, sqlx=0.8.6, tokio=1.45.1, zip=4.0.0 - minor syntax change ## `postgresql_extensions` - [0.18.4](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_extensions-v0.18.3...postgresql_extensions-v0.18.4) - 2025-05-15 ### Other - update Cargo.toml dependencies ## `postgresql_embedded` - [0.18.4](https://github.com/theseus-rs/postgresql-embedded/compare/v0.18.3...v0.18.4) - 2025-05-15 ### Other - update to Rust 1.87.0 - update dependencies - update Cargo.toml dependencies ## `postgresql_commands` - [0.18.4](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_commands-v0.18.3...postgresql_commands-v0.18.4) - 2025-05-15 ### Other - update to Rust 1.87.0 ## `postgresql_archive` - [0.18.4](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_archive-v0.18.3...postgresql_archive-v0.18.4) - 2025-05-15 ### Other - update to Rust 1.87.0 - update dependencies ## `postgresql_extensions` - [0.18.3](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_extensions-v0.18.2...postgresql_extensions-v0.18.3) - 2025-04-03 ### Other - update to Rust 1.86.0 ## `postgresql_embedded` - [0.18.3](https://github.com/theseus-rs/postgresql-embedded/compare/v0.18.2...v0.18.3) - 2025-04-03 ### Other - update Cargo.toml dependencies - update to Rust 1.86.0 ## `postgresql_archive` - [0.18.3](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_archive-v0.18.2...postgresql_archive-v0.18.3) - 2025-04-03 ### Other - update Cargo.toml dependencies ## `postgresql_extensions` - [0.18.2](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_extensions-v0.18.1...postgresql_extensions-v0.18.2) - 2025-03-21 ### Other - update Cargo.toml dependencies ## `postgresql_embedded` - [0.18.2](https://github.com/theseus-rs/postgresql-embedded/compare/v0.18.1...v0.18.2) - 2025-03-21 ### Other - update Cargo.toml dependencies ## `postgresql_commands` - [0.18.2](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_commands-v0.18.1...postgresql_commands-v0.18.2) - 2025-03-21 ### Other - update Cargo.toml dependencies ## `postgresql_archive` - [0.18.2](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_archive-v0.18.1...postgresql_archive-v0.18.2) - 2025-03-21 ### Other - update Cargo.toml dependencies ## `postgresql_embedded` - [0.18.1](https://github.com/theseus-rs/postgresql-embedded/compare/v0.18.0...v0.18.1) - 2025-02-26 ### Fix - Check for existing installations in children before installing ## `postgresql_extensions` - [0.18.0](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_extensions-v0.17.5...postgresql_extensions-v0.18.0) - 2025-02-20 ### Added - update to Rust 2024 edition ### Other - [**breaking**] rename feature rustls-tls to rustls ## `postgresql_commands` - [0.18.0](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_commands-v0.17.5...postgresql_commands-v0.18.0) - 2025-02-20 ### Added - update to Rust 2024 edition ## `postgresql_embedded` - [0.18.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.17.5...v0.18.0) - 2025-02-20 ### Added - update to Rust 2024 edition ### Other - update dependencies - [**breaking**] rename feature rustls-tls to rustls ## `postgresql_archive` - [0.18.0](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_archive-v0.17.5...postgresql_archive-v0.18.0) - 2025-02-20 ### Added - update to Rust 2024 edition ### Other - [**breaking**] rename feature rustls-tls to rustls ## `postgresql_extensions` - [0.17.5](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_extensions-v0.17.4...postgresql_extensions-v0.17.5) - 2025-01-25 ### Other - replace regex with regex-lite to reduce dependencies - update ci configuration ## `postgresql_commands` - [0.17.5](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_commands-v0.17.4...postgresql_commands-v0.17.5) - 2025-01-25 ### Other - remove anyhow and human_bytes dependencies ## `postgresql_embedded` - [0.17.5](https://github.com/theseus-rs/postgresql-embedded/compare/v0.17.4...v0.17.5) - 2025-01-25 ### Other - make tracing-indicatif optional - remove anyhow and human_bytes dependencies - replace regex with regex-lite to reduce dependencies - remove http dependency - update ci configuration ## `postgresql_archive` - [0.17.5](https://github.com/theseus-rs/postgresql-embedded/compare/postgresql_archive-v0.17.4...postgresql_archive-v0.17.5) - 2025-01-25 ### Other - replace regex with regex-lite to reduce dependencies - remove http dependency - make tracing-indicatif optional - remove anyhow and human_bytes dependencies ## `postgresql_embedded` - [v0.17.4](https://github.com/theseus-rs/postgresql-embedded/compare/v0.17.3...v0.17.4) - 2025-01-17 ### Chore - update to Rust 1.83 - update to Rust 1.84 ### Fix - correct deny.toml - use tokio::process::spawn() for pc_ctl on Windows ## `postgresql_embedded` - [v0.17.3](https://github.com/theseus-rs/postgresql-embedded/compare/v0.17.2...v0.17.3) - 2024-11-12 ### Build - update codecov action to version 4 - update code coverage generation - update to Rust 1.82.0 ### Chore - add FUNDING.yml - add FUNDING.yml - correct new linting errors - update dependencies - add Unicode-3.0 as an allowed license ### Fix - correct zonky extractor ## `postgresql_embedded` - [v0.17.2](https://github.com/theseus-rs/postgresql-embedded/compare/v0.17.1...v0.17.2) - 2024-10-01 ### Build - correct documentation build ## `postgresql_embedded` - [v0.17.1](https://github.com/theseus-rs/postgresql-embedded/compare/v0.17.0...v0.17.1) - 2024-10-01 ### Build - correct documentation build - update dependencies ## `postgresql_embedded` - [v0.17.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.16.3...v0.17.0) - 2024-09-28 ### Chore - update dependencies - add issue templates - forbid clippy allow attributes - add rust-toolchain.toml - updates for clippy lints ### Deprecated - [**breaking**] remove version 12 and deprecate version 13 ### Fix - allow archives to be bundled from alternate github repositories ### Test - update extension test to run with specific postgresql version ## `postgresql_embedded` - [v0.16.3](https://github.com/theseus-rs/postgresql-embedded/compare/v0.16.2...v0.16.3) - 2024-09-04 ### Chore - switch from xz2 to liblzma - ignore .idea directory ## `postgresql_embedded` - [v0.16.2](https://github.com/theseus-rs/postgresql-embedded/compare/v0.16.1...v0.16.2) - 2024-08-24 ### Build - update audit and deny checks ### Docs - split axum and progress bar examples - minor doc correction ### Fix - update dependencies to address [RUSTSEC-2024-0363](https://rustsec.org/advisories/RUSTSEC-2024-0363.html) ### Refactor - rename embedded_async_diesel_r2d2 to diesel_embedded ## `postgresql_embedded` - [v0.16.1](https://github.com/theseus-rs/postgresql-embedded/compare/v0.16.0...v0.16.1) - 2024-08-13 ### Build - remove unused dependencies ### Docs - add axum example - add indicatif to axum example ### Feat - add archive tracing progress bar status ### Fix - update maven status to set progress bar position ### Test - update version of postgresql used for testing from 16.3.0 to 16.4.0 - update windows test assertion ## `postgresql_embedded` - [v0.16.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.15.0...v0.16.0) - 2024-08-04 ### Build - sort dependencies - update dependencies - address lint error ### Docs - add PortalCorp example for pgvector ### Feat - add portal corp extensions ### Fix - correct steampipe extension url resolution - add .dll support - update steampipe to use detected OS if not on macos - correct extension regex to match file extensions ### Refactor - [**breaking**] refactor extension matchers - [**breaking**] return list of files from archive extract function - [**breaking**] refactor archive extract directories - refactor zonky extractor to use generic tar_xz_extractor ### Test - update portal corp test so that it does not run on macos x64 - add tests for extension matchers - update archive test assertions to be platform specific - update expected files extracted - improve matcher error tests - enable portal corp test for all platforms ## `postgresql_embedded` - [v0.15.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.14.2...v0.15.0) - 2024-08-01 ### Build - update tls features - add github feature to steampipe and tensor-chord ### Docs - correct doc errors - correct doc errors - add vector extension example - update vector_extension example to run queries ### Feat - [**breaking**] initial postgresql_extensions crate ### Fix - registered github archive repositories for extensions - correct steampipe install matcher - [**breaking**] update extension matchers to use postgresql major version - correct cargo check failure - correct serialization error writing configuration - correct vector example error - linting error ### Refactor - de-deduplicate steampipe matcher logic ### Test - add version tests - remove unused extension model - update lifecycle test to run on linux only - update steampipe test to run on macos - disable steampipe test on macos - update steampipe matcher test - improve model test coverage ## `postgresql_embedded` - [v0.14.2](https://github.com/theseus-rs/postgresql-embedded/compare/v0.14.1...v0.14.2) - 2024-07-17 ### Build - remove clear caches action ### Docs - add version optimization documentation ### Fix - updated PgConfigBuilder interface to align with pg_config executable - improve commands on windows to return stdout and stderr - correct linting errors ### Test - correct windows test failure ## `postgresql_embedded` - [v0.14.1](https://github.com/theseus-rs/postgresql-embedded/compare/v0.14.0...v0.14.1) - 2024-07-06 ### Build - change default from rustls-tls to native-tls - suppress lint warning - correct lint error - correct formatting - update non-windows build configuration - update non-windows build tests ### Docs - update docs for new features ### Fix - correct bug where commands hang on windows when retrieving stdout/stderr - correct hang when tokio is not used - update command tests to work on Windows ### Test - correct linux/macos tests - increase timeout to 30 seconds - increase timeout to 30 seconds - revert timeout to 5 seconds ## `postgresql_embedded` - [v0.14.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.13.0...v0.14.0) - 2024-07-03 ### Feat - [**breaking**] add feature flags to enable zonky ### Test - correct extract test implementations ## `postgresql_embedded` - [v0.13.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.12.0...v0.13.0) - 2024-07-01 ### Build - pin dependencies - update use definitions when blocking feature enabled - unpin dependencies - correct url dependency definition - correct documentation link error - print target triple during build - remove build caching - correct lint error - update license rules - correct formatting error ### Docs - update README.md - simplify documentation - remove reference to Bytes - update documentation - update readmes ### Feat - [**breaking**] add semantic versioing support and configurable repositories - add matcher registry - [**breaking**] add configurable hashers - add sha2-512 support - add blake2 and sha3 hash support - add hasher and matcher supports function - [**breaking**] add configurable extractors - add support for installing binaries from the zonky project - add SHA1 hash support for older Maven repositories - utilize sqlx for database management to support PostgreSQL installations that do not bundle psql - update hasher registry to work with Maven central and add MD5 hash ### Fix - correct asset hash logic - convert possible panics to errors ### Refactor - [**breaking**] rename ReleaseNotFound to VersionNotFound - [**breaking**] remove bytes dependency - [**breaking**] remove bytes dependency - remove default registry values ### Test - remove extraneous tests - add tests to improve test coverage - correct test_blake2b_512 - improve test coverage - add zonky archive integration test - correct hash test ## `postgresql_embedded` - [v0.12.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.11.0...v0.12.0) - 2024-06-21 ### Refactor - [**breaking**] move version from PostgreSQL::new() to Settings ## `postgresql_embedded` - [v0.11.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.10.2...v0.11.0) - 2024-06-20 ### Build - Enable pedantic lints ### Docs - update documentation - updated archive documentation examples ### Feat - [**breaking**] allow releases URL to be configured - allow releases url to be specified at build time when the bundles flag is set with the POSTGRESQL_RELEASES_URL environment variable - export Version to improve dx ### Fix - reference settings directly instead of via function call - update examples - pass settings release_url when bundled flag is set ### Test - add missing command error tests and clean up lint directives ## `postgresql_embedded` - [v0.10.2](https://github.com/theseus-rs/postgresql-embedded/compare/v0.10.1...v0.10.2) - 2024-06-18 ### Fix - correct errors when PGDATABASE envar is set ## `postgresql_embedded` - [v0.10.1](https://github.com/theseus-rs/postgresql-embedded/compare/v0.9.5...v0.10.1) - 2024-06-14 ### Build - allow Unicode-3.0 license ### Feat - [**breaking**] add ability to specify multiple pg_ctl options and define server configuration in Settings ## `postgresql_embedded` - [v0.9.5](https://github.com/theseus-rs/postgresql-embedded/compare/v0.9.4...v0.9.5) - 2024-06-03 ### Build - address pedantic clippy warnings ### Fix - don't require rustls for the build script. only enable by default. ## `postgresql_embedded` - [v0.9.4](https://github.com/theseus-rs/postgresql-embedded/compare/v0.9.3...v0.9.4) - 2024-05-31 ### Feat - add native-tls support ## `postgresql_embedded` - [v0.9.3](https://github.com/theseus-rs/postgresql-embedded/compare/v0.9.2...v0.9.3) - 2024-05-21 ### PostgreSQL - don't trace self, and when tracing commands only trace the base name. makes the traces less enormous and also avoids dumping passwords into traces. ## `postgresql_embedded` - [v0.9.2](https://github.com/theseus-rs/postgresql-embedded/compare/v0.9.1...v0.9.2) - 2024-05-19 ### Build - correct lint warnings - update dependencies ### Chore - update dependencies ### Fix - correct debug message ### Test - add authentication tests - improve test coverage ## `postgresql_embedded` - [v0.9.1](https://github.com/theseus-rs/postgresql-embedded/compare/v0.9.0...v0.9.1) - 2024-05-01 ### Fix - create extract_dir on same filesystem as out_dir ## `postgresql_embedded` - [v0.9.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.8.3...v0.9.0) - 2024-04-26 ### Fix - [**breaking**] define bootstrap superuser as postgres - [**breaking**] define bootstrap superuser as postgres ## `postgresql_embedded` - [v0.8.3](https://github.com/theseus-rs/postgresql-embedded/compare/v0.8.2...v0.8.3) - 2024-04-21 ### Build - add CODECOV_TOKEN to code coverage build step ### Chore - update dependencies - update reqwest libraries - address format error ## `postgresql_embedded` - [v0.8.2](https://github.com/theseus-rs/postgresql-embedded/compare/v0.8.1...v0.8.2) - 2024-04-05 ### Fix - suppress bytes parameter in tracing instrumentation ## `postgresql_embedded` - [v0.8.1](https://github.com/theseus-rs/postgresql-embedded/compare/v0.8.0...v0.8.1) - 2024-04-03 ### Build - update build dependencies to address audit check ### Test - add command integration test ## `postgresql_embedded` - [v0.8.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.7.3...v0.8.0) - 2024-04-03 ### Build - update dependencies - correct linting errors ### Refactor - [**breaking**] move commands into postgresql_commands crate ## `postgresql_embedded` - [v0.7.3](https://github.com/theseus-rs/postgresql-embedded/compare/v0.7.2...v0.7.3) - 2024-03-25 ### Chore - remove scorecard.yml ### Feat - add ability to create settings from a url ### Refact - remove use of embedded=true parameter ## `postgresql_embedded` - [v0.7.2](https://github.com/theseus-rs/postgresql-embedded/compare/v0.7.1...v0.7.2) - 2024-03-16 ### Chore - add Debug trait to CommandBuilder ### Feat - add tracing instrumentation ## `postgresql_embedded` - [v0.7.1](https://github.com/theseus-rs/postgresql-embedded/compare/v0.7.0...v0.7.1) - 2024-03-15 ### Fix - correct parallel archive extract failures ## `postgresql_embedded` - [v0.7.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.6.2...v0.7.0) - 2024-03-15 ### Docs - update vscode development container instructions ### Fix - [**breaking**] correct parallel archive extract failures ## `postgresql_embedded` - [v0.6.2](https://github.com/theseus-rs/postgresql-embedded/compare/v0.6.1...v0.6.2) - 2024-03-07 ### Chore - correct lint error ### Feat - add reqwest backoff/retry logic and tracing support ## `postgresql_embedded` - [v0.6.1](https://github.com/theseus-rs/postgresql-embedded/compare/v0.6.0...v0.6.1) - 2024-03-06 ### Chore - update use of settings of postgres connection and correct typo in output - update dependencies - remove use of copy left license MPL-2.0 from dependencies ### Fix - update dependencies to address RUSTSEC-2024-0020 ## `postgresql_embedded` - [v0.6.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.5.0...v0.6.0) - 2024-02-24 ### Chore - correct formatting - correct linting error ### Fix - [**breaking**] remove bundled as a default feature and corrected bug when the bundled feature is not used ## `postgresql_embedded` - [v0.5.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.4.1...v0.5.0) - 2024-02-22 ### Chore - remove unnecessary use of command pipes - update action permissions to reduce write privilege scope - ignore RUSTSEC-2023-0071 as it is only used in sqlx example code - correct linting errors ### Ci - run all benchmarks from workspace at once instead of individually from crates ### Docs - add SECURITY.md - add postgres driver and sqlx examples - add documentation explaining why RUSTSEC-2023-0071 is ignored ### Refactor - [**breaking**] refactor status to check on demand instead of attempting to track the state dynamically ### Test - remove unused code ## `postgresql_embedded` - [v0.4.1](https://github.com/theseus-rs/postgresql-embedded/compare/v0.4.0...v0.4.1) - 2024-02-18 ### Chore - Add initial dev container support - update windows to use UTF8 to align with other operating systems and utilize capabilities of the newer releases from https://github.com/theseus-rs/postgresql-binaries - add code coverage configuration - remove extraneous line in Cargo.toml - update release drafter to version 6 to address node 16 deprecation warning - update pr-benchmarks name ### Ci - update build to run benchmarks - add BENCHER_API_TOKEN to benchmark action - remove build.yml and move jobs into ci.yml - split benchmark runs - update build to run benchmarks - add benchmark pull request integration - update approach for setting ci-number - add pull-requests: write permission - remove conditional pr benchmark statements ### Docs - add cargo keywords - update docs for new functions - add bencher badges ### Feat - add devcontainer support ### Refactor - update psql to manage setting the PGPASSWORD environment variable when pg_password is set - refactor the way benchmarks run on the main branch vs a PR ### Test - add benchmarks - add CommandBuilder test coverage - correct the embedded lifecycle benchmark name - reduce archive benchmark sample size to 10 - update benchmark configuration - remove bencher conditional arguments - combine benchmark runs into one step - remove all bencher options - reduce embedded sample size to 10 to reduce benchmark runtime - update benchmark pull request configuration ## `postgresql_embedded` - [v0.4.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.3.2...v0.4.0) - 2024-02-13 ### Docs - add postgres to keywords ### Refactor - [**breaking**] remove archive hash from the public interface and always calculate/verify the has when requesting an archive - [**breaking**] remove archive hash from the public interface and always calculate/verify the has when requesting an archive - simplified installation logic and improved code coverage ### Test - improve lifecycle test coverage - update elapsed error test to sleep longer to prevent intermittent test failure ## `postgresql_embedded` - [v0.3.2](https://github.com/theseus-rs/postgresql-embedded/compare/v0.3.1...v0.3.2) - 2024-02-13 ### Bug - correct bug where serialization fails when there is a draft release of the PostgreSQL binaries ### Chore - add examples - add missing license definitions ### Test - update test code coverage - add tests for examples ## `postgresql_embedded` - [v0.3.1](https://github.com/theseus-rs/postgresql-embedded/compare/v0.3.0...v0.3.1) - 2024-02-12 ### Chore - address linting error - change tracing levels from info to debug ### Ci - add pull request labeler ### Docs - update cargo description ### Refactor - update postgresql_embedded::ArchiveError argument ## `postgresql_embedded` - [v0.3.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.2.3...v0.3.0) - 2024-02-11 ### Ci - add release drafter ### Refactor - [**breaking**] rename ArchiveError to postgresql_archive::Error and EmbeddedError to postgresql_embedded::Error ## `postgresql_embedded` - [v0.2.3](https://github.com/theseus-rs/postgresql-embedded/compare/v0.2.2...v0.2.3) - 2024-02-11 ### Ci - add scheduled action to clear github caches ## `postgresql_embedded` - [v0.2.2](https://github.com/theseus-rs/postgresql-embedded/compare/v0.2.1...v0.2.2) - 2024-02-11 ### Bug - warn when a release tag name does not match the expected version pattern ### Chore - remove default feature test - update release to 0.2.2 ### Docs - wrap synchronous API docs in feature blocks - remove ci badge from rust docs - update examples in documentation to remove unnecessary use of .unwrap() ### Feat - enable code coverage - add code coverage badges ### Test - add tests to improve code coverage - updated valid initial statuses ## `postgresql_embedded` - [v0.2.1](https://github.com/theseus-rs/postgresql-embedded/compare/v0.2.0...v0.2.1) - 2024-02-10 ### Chore - update release to 0.2.1 ### Docs - enable documentation features ## `postgresql_embedded` - [v0.2.0](https://github.com/theseus-rs/postgresql-embedded/compare/v0.1.2...v0.2.0) - 2024-02-10 ### Chore - update release to 0.2.0 ### Docs - updated examples to use no_run to prevent documentation build failures ## `postgresql_embedded` - [v0.1.2](https://github.com/theseus-rs/postgresql-embedded/compare/v0.1.1...v0.1.2) - 2024-02-10 ### Chore - remove cargo vet check - remove unused cargo dist configuration - update release to 0.1.2 ### Docs - update badges for release - correct crate repository urls - add documentation for CommandExecutor - remove note regarding tokio usage for the example - added documentation for POSTGRESQL_VERSION and GITHUB_TOKEN usage ## `postgresql_embedded` - [v0.1.1](https://github.com/theseus-rs/postgresql-embedded/compare/v0.1.0...v0.1.1) - 2024-02-10 ### Docs - mark docs as ignored to prevent doc release failures ## `postgresql_embedded` - [v0.1.0](https://github.com/theseus-rs/postgresql-embedded/compare/bd97bf1b5b53beb503034d499a0186c75ba6271e...v0.1.0) - 2024-02-10 ### Bug - corrected unused import and unused variable errors when building on windows - update postgresql_embedded to enable "bundled" as a default feature - correct doc lint - correct command test failures on windows - correct command builder test failures on windows - correct command builder test bugs on windows - update archive extract to support symlinks - corrected extract bug on MacOS caused by a directory being treated as a file - set encoding to SQL_ASCII for windows until binary is built with UTF8 support; use -o instead of --option when attempting to start the server - remove failing code coverage actions ### Build - *(deps)* bump tempfile from 3.9.0 to 3.10.0 ### Chore - initial CI configuration - updated tempfile config for cargo vet - reduce test execution and setup code coverage - enable rust / cargo caching for ci - enable caching to ci checks - update vet check for hermit-abi - update cargo vet config - add GITHUB_TOKEN to clippy and tests to address rate limiting - disable windows build - add author and release metadata - add missing crate descriptions - update release metadata ### Docs - update MIT License header - update ci status badge - disable blocking rust doc examples ### Feat - add ability to embed PostgreSQL installation in a Rust executable - add GITHUB_TOKEN as a Bearer token when calling the GitHub API in order to increase the rate limit - added initial tracing support ### Refactor - update the name of the postgresql binaries repository ### Test - refactor version constant tests so that they can be run in parallel to speed up builds - corrected pg_ctl test ================================================ FILE: Cargo.toml ================================================ [workspace] default-members = [ "postgresql_archive", "postgresql_commands", "postgresql_embedded", "postgresql_extensions", ] members = [ "examples/*", "postgresql_archive", "postgresql_commands", "postgresql_embedded", "postgresql_extensions", ] resolver = "3" [workspace.package] authors = ["Brian Heineman "] categories = ["database"] edition = "2024" keywords = ["postgresql", "postgres", "embedded", "database", "server"] license = "(Apache-2.0 OR MIT) AND PostgreSQL" repository = "https://github.com/theseus-rs/postgresql-embedded" rust-version = "1.92.0" version = "0.20.2" [workspace.dependencies] anyhow = "1.0.102" async-trait = "0.1.89" axum = "0.8.8" criterion = "0.8.2" diesel = "2.3.6" diesel_migrations = "2.3.1" flate2 = "1.1.9" futures-util = "0.3.32" hex = "0.4.3" indicatif = "0.18.4" indoc = "2.0.7" liblzma = "0.4.6" md-5 = "0.10.6" pgvector = "0.4.1" postgres = "0.19.12" quick-xml = "0.39.2" r2d2_postgres = "0.18.2" rand = "0.10.0" regex-lite = "0.1.9" reqwest = { version = "0.13.2", default-features = false } reqwest-middleware = "0.5.1" reqwest-retry = "0.9.1" reqwest-tracing = "0.7.0" semver = "1.0.27" serde = "1.0.228" serde_json = "1.0.149" sha1 = "0.10.6" sha2 = "0.10.9" sqlx = { version = "0.8.6", default-features = false, features = ["postgres"] } tar = "0.4.44" target-triple = "1.0.0" tempfile = "3.25.0" test-log = "0.2.19" thiserror = "2.0.18" tokio = "1.49.0" tracing = "0.1.44" tracing-indicatif = "0.3.14" tracing-subscriber = "0.3.22" url = "2.5.8" zip = { version = "8.1.0", default-features = false, features = ["deflate"] } [workspace.lints.rust] dead_code = "allow" missing_debug_implementations = "deny" unsafe_code = "deny" warnings = "deny" [workspace.lints.clippy] pedantic = { level = "deny", priority = -1 } allow_attributes = "deny" fallible_impl_from = "deny" unwrap_used = "deny" [workspace.metadata.release] shared-version = true dependent-version = "upgrade" tag-name = "v{{version}}" ================================================ FILE: LICENSE-APACHE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS ================================================ FILE: LICENSE-MIT ================================================ MIT License Copyright (c) 2023 Theseus contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.md ================================================

# PostgreSQL Embedded [![ci](https://github.com/theseus-rs/postgresql-embedded/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/theseus-rs/postgresql-embedded/actions/workflows/ci.yml) [![Documentation](https://docs.rs/postgresql_embedded/badge.svg)](https://docs.rs/postgresql_embedded) [![Code Coverage](https://codecov.io/gh/theseus-rs/postgresql-embedded/branch/main/graph/badge.svg)](https://codecov.io/gh/theseus-rs/postgresql-embedded) [![Benchmarks](https://img.shields.io/badge/%F0%9F%90%B0_bencher-enabled-6ec241)](https://bencher.dev/perf/theseus-rs-postgresql-embedded) [![Latest version](https://img.shields.io/crates/v/postgresql_embedded.svg)](https://crates.io/crates/postgresql_embedded) [![License](https://img.shields.io/crates/l/postgresql_embedded)](https://github.com/theseus-rs/postgresql-embedded#license) [![Semantic Versioning](https://img.shields.io/badge/%E2%9A%99%EF%B8%8F_SemVer-2.0.0-blue)](https://semver.org/spec/v2.0.0.html) Install and run a PostgreSQL database locally on Linux, MacOS or Windows. PostgreSQL can be bundled with your application, or downloaded on demand. This library provides an embedded-like experience for PostgreSQL similar to what you would have with SQLite. This is accomplished by downloading and installing PostgreSQL during runtime. There is also a "bundled" feature that when enabled, will download the PostgreSQL installation archive at compile time, include it in your binary and install from the binary version at runtime. In either case, PostgreSQL will run in a separate process space. ## Features - installing and running PostgreSQL - running PostgreSQL on ephemeral ports - Unix socket support - async and blocking API - bundling the PostgreSQL archive in an executable - semantic version resolution - ability to configure PostgreSQL startup options - settings builder for fluent configuration - URL based configuration - choice of native-tls or rustls - support for installing PostgreSQL extensions ## Getting Started ### Example ```rust use postgresql_embedded::{PostgreSQL, Result}; #[tokio::main] async fn main() -> Result<()> { let mut postgresql = PostgreSQL::default(); postgresql.setup().await?; postgresql.start().await?; let database_name = "test"; postgresql.create_database(database_name).await?; postgresql.database_exists(database_name).await?; postgresql.drop_database(database_name).await?; postgresql.stop().await } ``` ## Notes Supports using PostgreSQL binaries from: * [theseus-rs/postgresql-binaries](https://github.com/theseus-rs/postgresql-binaries) (default) * [zonkyio/embedded-postgres-binaries](https://github.com/zonkyio/embedded-postgres-binaries) ## Safety These crates use `#![forbid(unsafe_code)]` to ensure everything is implemented in 100% safe Rust. ## License Licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) at your option. PostgreSQL is covered under [The PostgreSQL License](https://opensource.org/licenses/postgresql). ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. ## Prior Art Projects that inspired this one: * [zonkyio/embedded-postgres-binaries](https://github.com/zonkyio/embedded-postgres-binaries) * [faokunega/pg-embed](https://github.com/faokunega/pg-embed) ================================================ FILE: SECURITY.md ================================================ # Security Policy ## Supported Versions Only the latest version of this crate is supported. ## Reporting a Vulnerability To report a security vulnerability, please use the form at https://github.com/theseus-rs/postgresql-embedded/security/advisories/new ================================================ FILE: clippy.toml ================================================ allow-unwrap-in-tests = true ================================================ FILE: deny.toml ================================================ # Documentation for this configuration file can be found here # https://embarkstudios.github.io/cargo-deny/checks/cfg.html [graph] targets = [ { triple = "aarch64-unknown-linux-gnu" }, { triple = "aarch64-unknown-linux-musl" }, { triple = "aarch64-apple-darwin" }, { triple = "x86_64-apple-darwin" }, { triple = "x86_64-pc-windows-msvc" }, { triple = "x86_64-unknown-linux-gnu" }, { triple = "x86_64-unknown-linux-musl" }, ] # https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html [licenses] allow = [ "Apache-2.0", "BSD-2-Clause", "BSD-3-Clause", "BSL-1.0", "MIT", "PostgreSQL", "Unicode-3.0", "Zlib", ] # https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html [advisories] ignore = [ ] # https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html [bans] multiple-versions = "deny" wildcards = "allow" deny = [] [[licenses.clarify]] name = "ring" expression = "MIT AND ISC AND OpenSSL" license-files = [ { path = "LICENSE", hash = 0xbd0eed23 } ] ================================================ FILE: examples/archive_async/Cargo.toml ================================================ [package] edition.workspace = true name = "archive_async" publish = false license.workspace = true version.workspace = true [dependencies] postgresql_archive = { path = "../../postgresql_archive" } tempfile = { workspace = true } tokio = { workspace = true, features = ["full"] } ================================================ FILE: examples/archive_async/src/main.rs ================================================ #![forbid(unsafe_code)] #![forbid(clippy::allow_attributes)] #![deny(clippy::pedantic)] use postgresql_archive::configuration::theseus; use postgresql_archive::{Result, VersionReq, extract, get_archive}; #[tokio::main] async fn main() -> Result<()> { let url = theseus::URL; let version_req = VersionReq::STAR; let (archive_version, archive) = get_archive(url, &version_req).await?; let out_dir = tempfile::tempdir()?.keep(); extract(url, &archive, &out_dir).await?; println!( "PostgreSQL {} extracted to {}", archive_version, out_dir.to_string_lossy() ); Ok(()) } #[cfg(test)] mod test { use super::*; #[test] fn test_archive_async_main() -> Result<()> { main() } } ================================================ FILE: examples/archive_sync/Cargo.toml ================================================ [package] edition.workspace = true name = "archive_sync" publish = false license.workspace = true version.workspace = true [dependencies] postgresql_archive = { path = "../../postgresql_archive", features = ["blocking"] } tempfile = { workspace = true } ================================================ FILE: examples/archive_sync/src/main.rs ================================================ use postgresql_archive::blocking::{extract, get_archive}; use postgresql_archive::configuration::theseus; use postgresql_archive::{Result, VersionReq}; fn main() -> Result<()> { let url = theseus::URL; let version_req = VersionReq::STAR; let (archive_version, archive) = get_archive(url, &version_req)?; let out_dir = tempfile::tempdir()?.keep(); extract(url, &archive, &out_dir)?; println!( "PostgreSQL {} extracted to {}", archive_version, out_dir.to_string_lossy() ); Ok(()) } #[cfg(test)] mod test { use super::*; #[test] fn test_archive_sync_main() -> Result<()> { main() } } ================================================ FILE: examples/axum_embedded/Cargo.toml ================================================ [package] edition.workspace = true name = "axum_embedded" publish = false license.workspace = true version.workspace = true [dependencies] anyhow = { workspace = true } axum = { workspace = true } postgresql_embedded = { path = "../../postgresql_embedded" } postgresql_extensions = { path = "../../postgresql_extensions" } sqlx = { workspace = true, features = ["runtime-tokio"] } tracing = { workspace = true } tracing-subscriber = { workspace = true } tokio = { workspace = true, features = ["full"] } ================================================ FILE: examples/axum_embedded/src/main.rs ================================================ use anyhow::Result; use axum::extract::State; use axum::{Json, Router, http::StatusCode, routing::get}; use postgresql_embedded::{PostgreSQL, Settings, VersionReq}; use sqlx::PgPool; use sqlx::postgres::PgPoolOptions; use std::env; use std::time::Duration; use tokio::net::TcpListener; use tracing::info; /// Example of how to use postgresql embedded with axum. #[tokio::main] async fn main() -> Result<()> { tracing_subscriber::fmt().compact().init(); let db_url = env::var("DATABASE_URL").unwrap_or_else(|_| "postgresql://postgres@localhost".to_string()); info!("Installing PostgreSQL"); let settings = Settings::from_url(&db_url)?; let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; info!("Installing the vector extension from PortalCorp"); postgresql_extensions::install( postgresql.settings(), "portal-corp", "pgvector_compiled", &VersionReq::parse("=0.16.12")?, ) .await?; info!("Starting PostgreSQL"); postgresql.start().await?; let database_name = "axum-test"; info!("Creating database {database_name}"); postgresql.create_database(database_name).await?; info!("Configuring extension"); let settings = postgresql.settings().clone(); let database_url = settings.url(database_name); let pool = PgPool::connect(database_url.as_str()).await?; pool.close().await; info!("Restarting database"); postgresql.stop().await?; postgresql.start().await?; info!("Setup connection pool"); let pool = PgPoolOptions::new() .max_connections(5) .acquire_timeout(Duration::from_secs(3)) .connect(&database_url) .await?; info!("Enabling extension"); enable_extension(&pool).await?; info!("Start application"); let app = Router::new().route("/", get(extensions)).with_state(pool); let listener = TcpListener::bind("0.0.0.0:3000").await.unwrap(); info!("Listening on {}", listener.local_addr()?); axum::serve(listener, app).await?; Ok(()) } async fn enable_extension(pool: &PgPool) -> Result<()> { sqlx::query("CREATE EXTENSION IF NOT EXISTS vector") .execute(pool) .await?; Ok(()) } async fn extensions(State(pool): State) -> Result>, (StatusCode, String)> { sqlx::query_scalar("SELECT name FROM pg_available_extensions ORDER BY name") .fetch_all(&pool) .await .map(Json) .map_err(internal_error) } fn internal_error(err: E) -> (StatusCode, String) { (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()) } ================================================ FILE: examples/diesel_embedded/Cargo.toml ================================================ [package] edition.workspace = true name = "diesel_embedded" publish = false license.workspace = true version.workspace = true [dependencies] diesel = { workspace = true, features = ["postgres", "r2d2"] } diesel_migrations = { workspace = true, features = ["postgres"] } postgresql_embedded = { path = "../../postgresql_embedded" } tokio = { workspace = true, features = ["full"] } ================================================ FILE: examples/diesel_embedded/README.md ================================================ This example is taken from [Getting Started with Diesel](https://diesel.rs/guides/getting-started) and modified to work with an embedded database. ================================================ FILE: examples/diesel_embedded/diesel.toml ================================================ # For documentation on how to configure this file, # see https://diesel.rs/guides/configuring-diesel-cli [print_schema] file = "src/schema.rs" custom_type_derives = ["diesel::query_builder::QueryId", "Clone"] [migrations_directory] dir = "./migrations" ================================================ FILE: examples/diesel_embedded/migrations/.keep ================================================ ================================================ FILE: examples/diesel_embedded/migrations/2024-08-17-200823_create_posts/down.sql ================================================ DROP TABLE posts ================================================ FILE: examples/diesel_embedded/migrations/2024-08-17-200823_create_posts/up.sql ================================================ CREATE TABLE posts ( id SERIAL PRIMARY KEY, title VARCHAR NOT NULL, body TEXT NOT NULL, published BOOLEAN NOT NULL DEFAULT FALSE ) ================================================ FILE: examples/diesel_embedded/src/main.rs ================================================ use crate::models::{NewPost, Post}; use diesel::r2d2::{ConnectionManager, Pool}; use diesel::{PgConnection, RunQueryDsl, SelectableHelper}; use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations}; use postgresql_embedded::{PostgreSQL, Result, Settings, VersionReq}; mod models; pub mod schema; const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations/"); #[tokio::main] async fn main() -> Result<()> { let settings = Settings { version: VersionReq::parse("=16.4.0")?, username: "postgres".to_string(), password: "postgres".to_string(), ..Default::default() }; let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; let database_name = "diesel_demo"; postgresql.create_database(database_name).await?; postgresql.database_exists(database_name).await?; { let database_url = postgresql.settings().url(database_name); let manager = ConnectionManager::::new(database_url); let pool = Pool::builder() .test_on_check_out(true) .build(manager) .expect("Could not build connection pool"); let mut mig_run = pool.clone().get().unwrap(); mig_run.run_pending_migrations(MIGRATIONS).unwrap(); let post = create_post( &mut pool.get().unwrap(), "My First Post", "This is my firs post", ); println!("Post '{}' created", post.title); } postgresql.drop_database(database_name).await?; postgresql.stop().await } /// Create a new post /// /// # Panics /// if the post cannot be saved pub fn create_post(conn: &mut PgConnection, title: &str, body: &str) -> Post { use crate::schema::posts; let new_post = NewPost { title, body }; diesel::insert_into(posts::table) .values(&new_post) .returning(Post::as_returning()) .get_result(conn) .expect("Error saving new post") } #[cfg(test)] mod test { use super::*; #[test] fn test_diesel_embedded_main() -> Result<()> { main() } } ================================================ FILE: examples/diesel_embedded/src/models.rs ================================================ use diesel::prelude::*; #[derive(Queryable, Selectable)] #[diesel(table_name = crate::schema::posts)] #[diesel(check_for_backend(diesel::pg::Pg))] pub struct Post { pub id: i32, pub title: String, pub body: String, pub published: bool, } #[derive(Insertable)] #[diesel(table_name = crate::schema::posts)] pub struct NewPost<'a> { pub title: &'a str, pub body: &'a str, } ================================================ FILE: examples/diesel_embedded/src/schema.rs ================================================ diesel::table! { posts (id) { id -> Int4, title -> Varchar, body -> Text, published -> Bool, } } ================================================ FILE: examples/download_progress_bar/Cargo.toml ================================================ [package] edition.workspace = true name = "download_progress_bar" publish = false license.workspace = true version.workspace = true [dependencies] anyhow = { workspace = true } indicatif = { workspace = true } postgresql_embedded = { path = "../../postgresql_embedded", features = ["indicatif"] } tracing-indicatif = { workspace = true } tracing-subscriber = { workspace = true } tokio = { workspace = true, features = ["full"] } ================================================ FILE: examples/download_progress_bar/src/main.rs ================================================ use anyhow::Result; use indicatif::ProgressStyle; use postgresql_embedded::{PostgreSQL, Settings, VersionReq}; use tracing_indicatif::IndicatifLayer; use tracing_subscriber::filter::LevelFilter; use tracing_subscriber::prelude::*; use tracing_subscriber::{Registry, fmt}; /// Example of how to display a progress bar for the postgresql embedded archive download #[tokio::main] async fn main() -> Result<()> { let progress_style = ProgressStyle::with_template("{span_child_prefix}{spinner} {span_name} [{elapsed_precise}] [{wide_bar:.green.bold}] {bytes}/{total_bytes} ({bytes_per_sec}, {eta})")? .progress_chars("=> "); let indicatif_layer = IndicatifLayer::new().with_progress_style(progress_style); let subscriber = Registry::default() .with(fmt::Layer::default().with_filter(LevelFilter::INFO)) .with(indicatif_layer); subscriber.init(); let settings = Settings { version: VersionReq::parse("=16.4.0")?, ..Default::default() }; let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; let database_name = "test"; postgresql.create_database(database_name).await?; postgresql.database_exists(database_name).await?; postgresql.drop_database(database_name).await?; postgresql.stop().await?; Ok(()) } #[cfg(test)] mod test { use super::*; #[test] fn test_download_progress_bar_main() -> Result<()> { main() } } ================================================ FILE: examples/embedded_async/Cargo.toml ================================================ [package] edition.workspace = true name = "embedded_async" publish = false license.workspace = true version.workspace = true [dependencies] postgresql_embedded = { path = "../../postgresql_embedded" } tokio = { workspace = true, features = ["full"] } ================================================ FILE: examples/embedded_async/src/main.rs ================================================ use postgresql_embedded::{PostgreSQL, Result, Settings, VersionReq}; #[tokio::main] async fn main() -> Result<()> { let settings = Settings { version: VersionReq::parse("=16.4.0")?, ..Default::default() }; let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; let database_name = "test"; postgresql.create_database(database_name).await?; postgresql.database_exists(database_name).await?; postgresql.drop_database(database_name).await?; postgresql.stop().await } #[cfg(test)] mod test { use super::*; #[test] fn test_embedded_async_main() -> Result<()> { main() } } ================================================ FILE: examples/embedded_sync/Cargo.toml ================================================ [package] edition.workspace = true name = "embedded_sync" publish = false license.workspace = true version.workspace = true [dependencies] postgresql_embedded = { path = "../../postgresql_embedded", features = ["blocking"] } ================================================ FILE: examples/embedded_sync/src/main.rs ================================================ use postgresql_embedded::Result; use postgresql_embedded::blocking::PostgreSQL; fn main() -> Result<()> { let mut postgresql = PostgreSQL::default(); postgresql.setup()?; postgresql.start()?; let database_name = "test"; postgresql.create_database(database_name)?; postgresql.database_exists(database_name)?; postgresql.drop_database(database_name)?; postgresql.stop() } #[cfg(test)] mod test { use super::*; #[test] fn test_embedded_sync_main() -> Result<()> { main() } } ================================================ FILE: examples/portal_corp_extension/Cargo.toml ================================================ [package] edition.workspace = true name = "portal_corp_extension" publish = false license.workspace = true version.workspace = true [dependencies] anyhow = { workspace = true } indoc = { workspace = true } pgvector = { workspace = true, features = ["sqlx"] } postgresql_embedded = { path = "../../postgresql_embedded" } postgresql_extensions = { path = "../../postgresql_extensions" } sqlx = { workspace = true, features = ["runtime-tokio"] } tracing = { workspace = true } tracing-subscriber = { workspace = true } tokio = { workspace = true, features = ["full"] } ================================================ FILE: examples/portal_corp_extension/src/main.rs ================================================ use anyhow::Result; use indoc::indoc; use pgvector::Vector; use sqlx::{PgPool, Row}; use tracing::info; use postgresql_embedded::{PostgreSQL, Settings, VersionReq}; /// Example of how to install and configure the `PortalCorp` pgvector extension. /// /// See: #[tokio::main] async fn main() -> Result<()> { tracing_subscriber::fmt().compact().init(); info!("Installing PostgreSQL"); let postgresql_version = VersionReq::parse("=16.4.0")?; let settings = Settings { version: postgresql_version.clone(), ..Default::default() }; let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; let settings = postgresql.settings(); // Skip the test if the PostgreSQL version does not match; when testing with the 'bundled' // feature, the version may vary and the test will fail. if settings.version != postgresql_version { eprintln!("Postgresql version does not match"); return Ok(()); } info!("Installing the vector extension from PortalCorp"); postgresql_extensions::install( postgresql.settings(), "portal-corp", "pgvector_compiled", &VersionReq::parse("=0.16.12")?, ) .await?; info!("Starting PostgreSQL"); postgresql.start().await?; let database_name = "vector-example"; info!("Creating database {database_name}"); postgresql.create_database(database_name).await?; info!("Configuring extension"); let settings = postgresql.settings(); let database_url = settings.url(database_name); let pool = PgPool::connect(database_url.as_str()).await?; pool.close().await; info!("Restarting database"); postgresql.stop().await?; postgresql.start().await?; info!("Enabling extension"); let pool = PgPool::connect(database_url.as_str()).await?; enable_extension(&pool).await?; info!("Creating table"); create_table(&pool).await?; info!("Creating data"); create_data(&pool).await?; info!("Get the nearest neighbors by L2 distance"); execute_query( &pool, "SELECT * FROM items ORDER BY embedding <-> '[3,1,2]' LIMIT 5", ) .await?; info!("Stopping database"); postgresql.stop().await?; Ok(()) } async fn enable_extension(pool: &PgPool) -> Result<()> { sqlx::query("DROP EXTENSION IF EXISTS vector") .execute(pool) .await?; sqlx::query("CREATE EXTENSION IF NOT EXISTS vector") .execute(pool) .await?; Ok(()) } async fn create_table(pool: &PgPool) -> Result<()> { sqlx::query(indoc! {" CREATE TABLE IF NOT EXISTS items ( id bigserial PRIMARY KEY, embedding vector(3) NOT NULL ) "}) .execute(pool) .await?; Ok(()) } async fn create_data(pool: &PgPool) -> Result<()> { sqlx::query(indoc! {" INSERT INTO items (embedding) VALUES ('[1,2,3]'), ('[4,5,6]') "}) .execute(pool) .await?; Ok(()) } async fn execute_query(pool: &PgPool, query: &str) -> Result<()> { info!("Query: {query}"); let rows = sqlx::query(query).fetch_all(pool).await?; for row in rows { let id: i64 = row.try_get("id")?; let embedding: Vector = row.try_get("embedding")?; info!("ID: {id}, Embedding: {embedding:?}"); } Ok(()) } #[cfg(test)] mod test { #[cfg(not(all(target_os = "linux", target_arch = "x86_64")))] use super::*; #[cfg(not(all(target_os = "linux", target_arch = "x86_64")))] #[test] fn test_portal_corp_extension_main() -> Result<()> { main() } } ================================================ FILE: examples/postgres_embedded/Cargo.toml ================================================ [package] edition.workspace = true name = "postgres_embedded" publish = false license.workspace = true version.workspace = true [dependencies] anyhow = { workspace = true } postgres = { workspace = true } postgresql_embedded = { path = "../../postgresql_embedded", features = ["blocking"] } ================================================ FILE: examples/postgres_embedded/README.md ================================================ This example is based on [sqlx/example/todos](https://github.com/launchbadge/sqlx/tree/main/examples/postgres/todos) and modified to work with the postgres driver. ================================================ FILE: examples/postgres_embedded/src/main.rs ================================================ use anyhow::Result; use postgres::{Client, NoTls}; use postgresql_embedded::blocking::PostgreSQL; fn main() -> Result<()> { let mut postgresql = PostgreSQL::default(); postgresql.setup()?; postgresql.start()?; let database_name = "test"; postgresql.create_database(database_name)?; let settings = postgresql.settings(); let mut client = Client::connect( format!( "host={host} port={port} user={username} password={password}", host = settings.host, port = settings.port, username = settings.username, password = settings.password ) .as_str(), NoTls, )?; println!("Creating table 'todos'"); create_table_todo(&mut client)?; let description = "Implement embedded database with postgres"; println!("Adding new todo with description '{description}'"); let todo_id = add_todo(&mut client, description)?; println!("Added new todo with id {todo_id}"); println!("Marking todo {todo_id} as done"); if complete_todo(&mut client, todo_id)? { println!("Todo {todo_id} is marked as done"); } println!("Printing list of all todos"); list_todos(&mut client)?; Ok(()) } fn create_table_todo(client: &mut Client) -> Result<()> { let _ = client.execute( "CREATE TABLE IF NOT EXISTS todos (id BIGSERIAL PRIMARY KEY, description TEXT NOT NULL, done BOOLEAN NOT NULL DEFAULT FALSE);", &[], )?; Ok(()) } fn add_todo(client: &mut Client, description: &str) -> Result { let row = client.query_one( "INSERT INTO todos (description) VALUES ($1) RETURNING id", &[&description], )?; let id: i64 = row.get(0); Ok(id) } fn complete_todo(client: &mut Client, id: i64) -> Result { let rows_affected = client.execute("UPDATE todos SET done = TRUE WHERE id = $1", &[&id])?; Ok(rows_affected > 0) } fn list_todos(client: &mut Client) -> Result<()> { let rows = client.query("SELECT id, description, done FROM todos ORDER BY id", &[])?; for rec in rows { let id: i64 = rec.get("id"); let description: String = rec.get("description"); let done: bool = rec.get("done"); println!( "- [{}] {}: {}", if done { "x" } else { " " }, id, &description, ); } Ok(()) } #[cfg(test)] mod test { use super::*; #[test] fn test_postgres_embedded_main() -> Result<()> { main() } } ================================================ FILE: examples/sqlx_embedded/Cargo.toml ================================================ [package] edition.workspace = true name = "sqlx_embedded" publish = false license.workspace = true version.workspace = true [dependencies] anyhow = { workspace = true } postgresql_embedded = { path = "../../postgresql_embedded" } sqlx = { workspace = true, features = ["runtime-tokio"] } tokio = { workspace = true, features = ["full"] } ================================================ FILE: examples/sqlx_embedded/README.md ================================================ This example is taken from [sqlx/example/todos](https://github.com/launchbadge/sqlx/tree/main/examples/postgres/todos) and modified to work with an embedded database. ================================================ FILE: examples/sqlx_embedded/src/main.rs ================================================ use anyhow::Result; use postgresql_embedded::PostgreSQL; use sqlx::Row; use sqlx::postgres::PgPool; #[tokio::main] async fn main() -> Result<()> { let mut postgresql = PostgreSQL::default(); postgresql.setup().await?; postgresql.start().await?; let database_name = "test"; postgresql.create_database(database_name).await?; let settings = postgresql.settings(); let database_url = settings.url(database_name); let pool = PgPool::connect(database_url.as_str()).await?; println!("Creating table 'todos'"); create_table_todo(&pool).await?; let description = "Implement embedded database with sqlx"; println!("Adding new todo with description '{description}'"); let todo_id = add_todo(&pool, description).await?; println!("Added new todo with id {todo_id}"); println!("Marking todo {todo_id} as done"); if complete_todo(&pool, todo_id).await? { println!("Todo {todo_id} is marked as done"); } println!("Printing list of all todos"); list_todos(&pool).await?; Ok(()) } async fn create_table_todo(pool: &PgPool) -> Result<()> { sqlx::query( "CREATE TABLE IF NOT EXISTS todos(id BIGSERIAL PRIMARY KEY, description TEXT NOT NULL, done BOOLEAN NOT NULL DEFAULT FALSE);" ).execute(pool).await?; Ok(()) } async fn add_todo(pool: &PgPool, description: &str) -> Result { let rec = sqlx::query("INSERT INTO todos (description) VALUES ($1) RETURNING id") .bind(description) .fetch_one(pool) .await?; let id: i64 = rec.get("id"); Ok(id) } async fn complete_todo(pool: &PgPool, id: i64) -> Result { let rows_affected = sqlx::query("UPDATE todos SET done = TRUE WHERE id = $1") .bind(id) .execute(pool) .await? .rows_affected(); Ok(rows_affected > 0) } async fn list_todos(pool: &PgPool) -> Result<()> { let recs = sqlx::query("SELECT id, description, done FROM todos ORDER BY id") .fetch_all(pool) .await?; for rec in recs { let id: i64 = rec.get("id"); let description: String = rec.get("description"); let done: bool = rec.get("done"); println!( "- [{}] {}: {}", if done { "x" } else { " " }, id, &description, ); } Ok(()) } #[cfg(test)] mod test { use super::*; #[test] fn test_sqlx_embedded_main() -> Result<()> { main() } } ================================================ FILE: examples/tensor_chord_extension/Cargo.toml ================================================ [package] edition.workspace = true name = "tensor_chord_extension" publish = false license.workspace = true version.workspace = true [dependencies] anyhow = { workspace = true } indoc = { workspace = true } postgresql_embedded = { path = "../../postgresql_embedded" } postgresql_extensions = { path = "../../postgresql_extensions" } sqlx = { workspace = true, features = ["runtime-tokio"] } tracing = { workspace = true } tracing-subscriber = { workspace = true } tokio = { workspace = true, features = ["full"] } ================================================ FILE: examples/tensor_chord_extension/src/main.rs ================================================ use anyhow::Result; use indoc::indoc; use sqlx::{PgPool, Row}; use tracing::info; use postgresql_embedded::{PostgreSQL, Settings, VersionReq}; /// Example of how to install and configure the `TensorChord` vector extension. /// /// See: #[tokio::main] async fn main() -> Result<()> { tracing_subscriber::fmt().compact().init(); info!("Installing PostgreSQL"); let settings = Settings { version: VersionReq::parse("=16.4.0")?, ..Default::default() }; let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; info!("Installing the vector extension from TensorChord"); postgresql_extensions::install( postgresql.settings(), "tensor-chord", "pgvecto.rs", &VersionReq::parse("=0.4.0")?, ) .await?; info!("Starting PostgreSQL"); postgresql.start().await?; let database_name = "vector-example"; info!("Creating database {database_name}"); postgresql.create_database(database_name).await?; info!("Configuring extension"); let settings = postgresql.settings(); let database_url = settings.url(database_name); let pool = PgPool::connect(database_url.as_str()).await?; configure_extension(&pool).await?; pool.close().await; info!("Restarting database"); postgresql.stop().await?; postgresql.start().await?; info!("Enabling extension"); let pool = PgPool::connect(database_url.as_str()).await?; enable_extension(&pool).await?; info!("Creating table"); create_table(&pool).await?; info!("Creating data"); create_data(&pool).await?; info!("Squared Euclidean Distance"); execute_query( &pool, "SELECT '[1, 2, 3]'::vector <-> '[3, 2, 1]'::vector AS value", ) .await?; info!("Negative Dot Product"); execute_query( &pool, "SELECT '[1, 2, 3]'::vector <#> '[3, 2, 1]'::vector AS value", ) .await?; info!("Cosine Distance"); execute_query( &pool, "SELECT '[1, 2, 3]'::vector <=> '[3, 2, 1]'::vector AS value", ) .await?; info!("Stopping database"); postgresql.stop().await?; Ok(()) } async fn configure_extension(pool: &PgPool) -> Result<()> { sqlx::query("ALTER SYSTEM SET shared_preload_libraries = \"vectors.so\"") .execute(pool) .await?; sqlx::query("ALTER SYSTEM SET search_path = \"$user\", public, vectors") .execute(pool) .await?; Ok(()) } async fn enable_extension(pool: &PgPool) -> Result<()> { sqlx::query("DROP EXTENSION IF EXISTS vectors") .execute(pool) .await?; sqlx::query("CREATE EXTENSION IF NOT EXISTS vectors") .execute(pool) .await?; Ok(()) } async fn create_table(pool: &PgPool) -> Result<()> { sqlx::query(indoc! {" CREATE TABLE IF NOT EXISTS items ( id bigserial PRIMARY KEY, embedding vector(3) NOT NULL ) "}) .execute(pool) .await?; Ok(()) } async fn create_data(pool: &PgPool) -> Result<()> { sqlx::query(indoc! {" INSERT INTO items (embedding) VALUES ('[1,2,3]'), ('[4,5,6]') "}) .execute(pool) .await?; sqlx::query(indoc! {" INSERT INTO items (embedding) VALUES (ARRAY[1, 2, 3]::real[]), (ARRAY[4, 5, 6]::real[] ) "}) .execute(pool) .await?; Ok(()) } async fn execute_query(pool: &PgPool, query: &str) -> Result<()> { let row = sqlx::query(query).fetch_one(pool).await?; let value: f32 = row.try_get("value")?; info!("{}: {}", query, value); Ok(()) } // #[cfg(test)] // mod test { // use super::*; // // #[test] // #[ignore = "this extension has been deprecated"] // fn test_tensor_chord_extension_main() -> Result<()> { // main() // } // } ================================================ FILE: examples/unix_socket/Cargo.toml ================================================ [package] edition.workspace = true name = "unix_socket" publish = false license.workspace = true version.workspace = true [dependencies] postgresql_embedded = { path = "../../postgresql_embedded" } tempfile = { workspace = true } tokio = { workspace = true, features = ["full"] } ================================================ FILE: examples/unix_socket/src/main.rs ================================================ use postgresql_embedded::{PostgreSQL, Result, SettingsBuilder}; #[cfg(unix)] #[tokio::main] async fn main() -> Result<()> { let socket_dir = tempfile::tempdir().expect("failed to create temp dir for socket"); let settings = SettingsBuilder::new() .socket_dir(socket_dir.path().to_path_buf()) .build(); let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; let port = postgresql.settings().port; let socket_file = socket_dir.path().join(format!(".s.PGSQL.{port}")); println!("PostgreSQL is listening on Unix socket: {socket_file:?}"); let database_name = "test"; postgresql.create_database(database_name).await?; println!("Created database '{database_name}'"); let exists = postgresql.database_exists(database_name).await?; println!("Database '{database_name}' exists: {exists}"); postgresql.drop_database(database_name).await?; println!("Dropped database '{database_name}'"); postgresql.stop().await?; println!("PostgreSQL stopped"); Ok(()) } #[cfg(not(unix))] fn main() { eprintln!("Unix socket support is only available on Unix platforms"); } #[cfg(test)] #[cfg(unix)] mod test { use super::*; #[test] fn test_unix_socket_main() -> Result<()> { main() } } ================================================ FILE: examples/zonky/Cargo.toml ================================================ [package] edition.workspace = true name = "zonky" publish = false license.workspace = true version.workspace = true [dependencies] postgresql_archive = { path = "../../postgresql_archive" } postgresql_embedded = { path = "../../postgresql_embedded", default-features = false, features = ["zonky"] } tokio = { workspace = true, features = ["full"] } ================================================ FILE: examples/zonky/src/main.rs ================================================ use postgresql_archive::VersionReq; use postgresql_archive::configuration::zonky; use postgresql_embedded::{PostgreSQL, Result, Settings}; #[tokio::main] async fn main() -> Result<()> { let settings = Settings { releases_url: zonky::URL.to_string(), version: VersionReq::parse("=16.3.0")?, ..Default::default() }; let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; let database_name = "test"; postgresql.create_database(database_name).await?; postgresql.database_exists(database_name).await?; postgresql.drop_database(database_name).await?; postgresql.stop().await } #[cfg(test)] mod test { #[cfg(not(all(target_os = "linux", target_arch = "x86_64")))] use super::*; #[cfg(not(all(target_os = "linux", target_arch = "x86_64")))] #[test] fn test_zonky_main() -> Result<()> { main() } } ================================================ FILE: postgresql_archive/Cargo.toml ================================================ [package] authors.workspace = true categories.workspace = true description = "A library for downloading and extracting PostgreSQL archives" edition.workspace = true keywords.workspace = true license.workspace = true name = "postgresql_archive" repository = "https://github.com/theseus-rs/postgresql-embedded" rust-version.workspace = true version.workspace = true [dependencies] async-trait = { workspace = true } flate2 = { workspace = true, optional = true } futures-util = { workspace = true } hex = { workspace = true } liblzma = { workspace = true, optional = true } md-5 = { workspace = true, optional = true } quick-xml = { workspace = true, features = ["serialize"], optional = true } regex-lite = { workspace = true } reqwest = { workspace = true, default-features = false, features = ["http2", "json", "query", "stream"] } reqwest-middleware = { workspace = true, features = ["query"] } reqwest-retry = { workspace = true } reqwest-tracing = { workspace = true } semver = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true, optional = true } sha1 = { workspace = true, optional = true } sha2 = { workspace = true, optional = true } tar = { workspace = true, optional = true } target-triple = { workspace = true } tempfile = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"], optional = true } tracing = { workspace = true, features = ["log"] } tracing-indicatif = { workspace = true, optional = true } url = { workspace = true } zip = { workspace = true, optional = true } [dev-dependencies] anyhow = { workspace = true } criterion = { workspace = true } hex = { workspace = true } test-log = { workspace = true } tokio = { workspace = true } [features] default = [ "native-tls", "theseus" ] blocking = ["dep:tokio"] github = [ "dep:serde_json", ] indicatif = [ "dep:tracing-indicatif" ] maven = [ "dep:quick-xml", "md5", "sha1", "sha2", ] md5 = ["dep:md-5"] native-tls = ["reqwest/native-tls"] rustls = ["reqwest/rustls"] sha1 = ["dep:sha1"] sha2 = ["dep:sha2"] tar-gz = [ "dep:flate2", "dep:tar", ] tar-xz = [ "dep:liblzma", "dep:tar", ] theseus = [ "github", "sha2", "tar-gz", ] zip = [ "dep:zip", ] zonky = [ "maven", "tar-xz", "zip", ] [package.metadata.docs.rs] features = ["blocking"] targets = ["x86_64-unknown-linux-gnu"] [[bench]] harness = false name = "archive" [package.metadata.cargo-machete] ignored = [ "md-5", "serde_json", ] ================================================ FILE: postgresql_archive/README.md ================================================ # PostgreSQL Archive [![ci](https://github.com/theseus-rs/postgresql-embedded/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/theseus-rs/postgresql-embedded/actions/workflows/ci.yml) [![Documentation](https://docs.rs/postgresql_archive/badge.svg)](https://docs.rs/postgresql_archive) [![Code Coverage](https://codecov.io/gh/theseus-rs/postgresql-embedded/branch/main/graph/badge.svg)](https://codecov.io/gh/theseus-rs/postgresql-embedded) [![Benchmarks](https://img.shields.io/badge/%F0%9F%90%B0_bencher-enabled-6ec241)](https://bencher.dev/perf/theseus-rs-postgresql-embedded) [![Latest version](https://img.shields.io/crates/v/postgresql_archive.svg)](https://crates.io/crates/postgresql_archive) [![License](https://img.shields.io/crates/l/postgresql_archive?)](https://github.com/theseus-rs/postgresql-embedded/tree/main/postgresql_archive#license) [![Semantic Versioning](https://img.shields.io/badge/%E2%9A%99%EF%B8%8F_SemVer-2.0.0-blue)](https://semver.org/spec/v2.0.0.html) A configurable library for downloading and extracting PostgreSQL archives. ## Examples ### Asynchronous API ```rust use postgresql_archive::{extract, get_archive, Result, VersionReq}; use postgresql_archive::configuration::theseus; #[tokio::main] async fn main() -> Result<()> { let url = theseus::URL; let (archive_version, archive) = get_archive(url, &VersionReq::STAR).await?; let out_dir = std::env::temp_dir(); extract(url, &archive, &out_dir).await } ``` ### Synchronous API ```rust use postgresql_archive::configuration::theseus; use postgresql_archive::{Result, VersionReq}; use postgresql_archive::blocking::{extract, get_archive}; fn main() -> Result<()> { let url = theseus::URL; let (archive_version, archive) = get_archive(url, &VersionReq::STAR)?; let out_dir = std::env::temp_dir(); extract(url, &archive, &out_dir) } ``` ## Feature flags postgresql_archive uses [feature flags] to address compile time and binary size uses. The following features are available: | Name | Description | Default? | |--------------|----------------------------------|----------| | `blocking` | Enables the blocking API | No | | `indicatif` | Enables tracing-indcatif support | No | | `native-tls` | Enables native-tls support | Yes | | `rustls` | Enables rustls support | No | ### Configurations | Name | Description | Default? | |-----------|-------------------------------------|----------| | `theseus` | Enables theseus PostgreSQL binaries | Yes | | `zonky` | Enables zonky PostgreSQL binaries | No | ### Extractors | Name | Description | Default? | |----------|--------------------------|----------| | `tar-gz` | Enables tar gz extractor | Yes | | `tar-xz` | Enables tar xz extractor | No | | `zip` | Enables zip extractor | No | ### Hashers | Name | Description | Default? | |--------|----------------------|----------| | `md5` | Enables md5 hashers | No | | `sha1` | Enables sha1 hashers | No | | `sha2` | Enables sha2 hashers | Yes¹ | ¹ enabled by the `theseus` feature flag. ### Repositories | Name | Description | Default? | |----------|---------------------------|----------| | `github` | Enables github repository | Yes¹ | | `maven` | Enables maven repository | No | ¹ enabled by the `theseus` feature flag. ## Supported platforms `postgresql_archive` provides implementations for the following: * [theseus-rs/postgresql-binaries](https://github.com/theseus-rs/postgresql-binaries) * [zonkyio/embedded-postgres-binaries](https://github.com/zonkyio/embedded-postgres-binaries) ## License Licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) at your option. ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. ================================================ FILE: postgresql_archive/benches/archive.rs ================================================ use criterion::{Criterion, criterion_group, criterion_main}; use postgresql_archive::blocking::{extract, get_archive}; use postgresql_archive::configuration::theseus; use postgresql_archive::{Result, VersionReq}; use std::fs::{create_dir_all, remove_dir_all}; use std::time::Duration; fn benchmarks(criterion: &mut Criterion) { bench_extract(criterion).ok(); } fn bench_extract(criterion: &mut Criterion) -> Result<()> { let version_req = VersionReq::STAR; let (_archive_version, archive) = get_archive(theseus::URL, &version_req)?; criterion.bench_function("extract", |bencher| { bencher.iter(|| { extract_archive(&archive).ok(); }); }); Ok(()) } fn extract_archive(archive: &Vec) -> Result<()> { let out_dir = tempfile::tempdir()?.path().to_path_buf(); create_dir_all(&out_dir)?; extract(theseus::URL, archive, &out_dir)?; remove_dir_all(&out_dir)?; Ok(()) } criterion_group!( name = benches; config = Criterion::default() .measurement_time(Duration::from_secs(30)) .sample_size(10); targets = benchmarks ); criterion_main!(benches); ================================================ FILE: postgresql_archive/src/archive.rs ================================================ //! Manage PostgreSQL archives #![allow(dead_code)] use crate::error::Result; use crate::{extractor, repository}; use regex_lite::Regex; use semver::{Version, VersionReq}; use std::path::{Path, PathBuf}; use tracing::instrument; /// Gets the version for the specified [version requirement](VersionReq). If a version for the /// [version requirement](VersionReq) is not found, then an error is returned. /// /// # Errors /// * If the version is not found. #[instrument(level = "debug")] pub async fn get_version(url: &str, version_req: &VersionReq) -> Result { let repository = repository::registry::get(url)?; let version = repository.get_version(version_req).await?; Ok(version) } /// Gets the archive for a given [version requirement](VersionReq) that passes the default /// matcher. If no archive is found for the [version requirement](VersionReq) and matcher then /// an [error](crate::error::Error) is returned. /// /// # Errors /// * If the archive is not found. /// * If the archive cannot be downloaded. #[instrument] pub async fn get_archive(url: &str, version_req: &VersionReq) -> Result<(Version, Vec)> { let repository = repository::registry::get(url)?; let archive = repository.get_archive(version_req).await?; let version = archive.version().clone(); let bytes = archive.bytes().to_vec(); Ok((version, bytes)) } /// Extracts the compressed tar `bytes` to the [out_dir](Path). /// /// # Errors /// Returns an error if the extraction fails. #[instrument(skip(bytes))] pub async fn extract(url: &str, bytes: &Vec, out_dir: &Path) -> Result> { let extractor_fn = extractor::registry::get(url)?; let mut extract_directories = extractor::ExtractDirectories::default(); extract_directories.add_mapping(Regex::new(".*")?, out_dir.to_path_buf()); extractor_fn(bytes, &extract_directories) } #[cfg(test)] mod tests { use super::*; use crate::configuration::theseus::URL; #[tokio::test] async fn test_get_version() -> Result<()> { let version_req = VersionReq::parse("=16.4.0")?; let version = get_version(URL, &version_req).await?; assert_eq!(Version::new(16, 4, 0), version); Ok(()) } #[tokio::test] async fn test_get_archive() -> Result<()> { let version_req = VersionReq::parse("=16.4.0")?; let (version, bytes) = get_archive(URL, &version_req).await?; assert_eq!(Version::new(16, 4, 0), version); assert!(!bytes.is_empty()); Ok(()) } } ================================================ FILE: postgresql_archive/src/blocking/archive.rs ================================================ use crate::{Version, VersionReq}; use std::path::{Path, PathBuf}; use std::sync::LazyLock; use tokio::runtime::Runtime; static RUNTIME: LazyLock = LazyLock::new(|| Runtime::new().unwrap()); /// Gets the version for the specified [version requirement](VersionReq). If a version for the /// [version requirement](VersionReq) is not found, then an error is returned. /// /// # Errors /// * If the version is not found. pub fn get_version(url: &str, version_req: &VersionReq) -> crate::Result { RUNTIME .handle() .block_on(async move { crate::get_version(url, version_req).await }) } /// Gets the archive for a given [version requirement](VersionReq) that passes the default /// matcher. /// /// If no archive is found for the [version requirement](VersionReq) and matcher then /// an [error](crate::error::Error) is returned. /// /// # Errors /// * If the archive is not found. /// * If the archive cannot be downloaded. pub fn get_archive(url: &str, version_req: &VersionReq) -> crate::Result<(Version, Vec)> { RUNTIME .handle() .block_on(async move { crate::get_archive(url, version_req).await }) } /// Extracts the compressed tar `bytes` to the [out_dir](Path). /// /// # Errors /// Returns an error if the extraction fails. pub fn extract(url: &str, bytes: &Vec, out_dir: &Path) -> crate::Result> { RUNTIME .handle() .block_on(async move { crate::extract(url, bytes, out_dir).await }) } ================================================ FILE: postgresql_archive/src/blocking/mod.rs ================================================ mod archive; pub use archive::{extract, get_archive, get_version}; ================================================ FILE: postgresql_archive/src/configuration/custom/matcher.rs ================================================ use semver::Version; /// Matcher for PostgreSQL binaries from custom GitHub release repositories following the same /// pattern as /// /// # Errors /// * If the asset matcher fails. pub fn matcher(_url: &str, name: &str, version: &Version) -> crate::Result { let target = target_triple::TARGET; // TODO: consider relaxing the version format to allow for more flexibility in where the version // and target appear in the filename. let expected_name = format!("postgresql-{version}-{target}.tar.gz"); Ok(name == expected_name) } #[cfg(test)] mod tests { use super::*; use crate::{Result, matcher}; const TEST_URL: &str = "https://github.com/owner/repo"; #[test] fn test_register_custom_repo() -> Result<()> { #[expect(clippy::unnecessary_wraps)] fn supports_fn(url: &str) -> Result { Ok(url == TEST_URL) } matcher::registry::register(supports_fn, matcher)?; let matcher = matcher::registry::get(TEST_URL)?; let version = Version::new(16, 3, 0); let expected_name = format!("postgresql-{}-{}.tar.gz", version, target_triple::TARGET); assert!(matcher("", &expected_name, &version)?); Ok(()) } } ================================================ FILE: postgresql_archive/src/configuration/custom/mod.rs ================================================ pub mod matcher; pub use matcher::matcher; ================================================ FILE: postgresql_archive/src/configuration/mod.rs ================================================ pub mod custom; #[cfg(feature = "theseus")] pub mod theseus; #[cfg(feature = "zonky")] pub mod zonky; ================================================ FILE: postgresql_archive/src/configuration/theseus/extractor.rs ================================================ use crate::Error::Unexpected; use crate::Result; use crate::extractor::{ExtractDirectories, tar_gz_extract}; use regex_lite::Regex; use std::fs::{create_dir_all, remove_dir_all, remove_file, rename}; use std::path::{Path, PathBuf}; use std::thread::sleep; use std::time::Duration; use tracing::{debug, instrument, warn}; /// Extracts the compressed tar `bytes` to the [out_dir](Path). /// /// # Errors /// Returns an error if the extraction fails. #[instrument(skip(bytes))] pub fn extract(bytes: &Vec, extract_directories: &ExtractDirectories) -> Result> { let out_dir = extract_directories.get_path(".")?; let parent_dir = if let Some(parent) = out_dir.parent() { parent } else { debug!("No parent directory for {}", out_dir.to_string_lossy()); out_dir.as_path() }; create_dir_all(parent_dir)?; let lock_file = acquire_lock(parent_dir)?; // If the directory already exists, then the archive has already been // extracted by another process. if out_dir.exists() { debug!( "Directory already exists {}; skipping extraction: ", out_dir.to_string_lossy() ); remove_file(&lock_file)?; return Ok(Vec::new()); } let extract_dir = tempfile::tempdir_in(parent_dir)?.keep(); debug!("Extracting archive to {}", extract_dir.to_string_lossy()); let mut archive_extract_directories = ExtractDirectories::default(); archive_extract_directories.add_mapping(Regex::new(".*")?, extract_dir.clone()); let files = tar_gz_extract(bytes, &archive_extract_directories)?; if out_dir.exists() { debug!( "Directory already exists {}; skipping rename and removing extraction directory: {}", out_dir.to_string_lossy(), extract_dir.to_string_lossy() ); remove_dir_all(&extract_dir)?; } else { debug!( "Renaming {} to {}", extract_dir.to_string_lossy(), out_dir.to_string_lossy() ); rename(extract_dir, out_dir)?; } if lock_file.is_file() { debug!("Removing lock file: {}", lock_file.to_string_lossy()); remove_file(lock_file)?; } Ok(files) } /// Acquires a lock file in the [out_dir](Path) to prevent multiple processes from extracting the /// archive at the same time. /// /// # Errors /// * If the lock file cannot be acquired. #[instrument(level = "debug")] fn acquire_lock(out_dir: &Path) -> Result { let lock_file = out_dir.join("postgresql-archive.lock"); if lock_file.is_file() { let metadata = lock_file.metadata()?; let created = metadata.created()?; if created.elapsed()?.as_secs() > 300 { warn!( "Stale lock file detected; removing file to attempt process recovery: {}", lock_file.to_string_lossy() ); remove_file(&lock_file)?; } } debug!( "Attempting to acquire lock: {}", lock_file.to_string_lossy() ); for _ in 0..30 { let lock = std::fs::OpenOptions::new() .create(true) .truncate(true) .write(true) .open(&lock_file); match lock { Ok(_) => { debug!("Lock acquired: {}", lock_file.to_string_lossy()); return Ok(lock_file); } Err(error) => { warn!("unable to acquire lock: {error}"); sleep(Duration::from_secs(1)); } } } Err(Unexpected("Failed to acquire lock".to_string())) } ================================================ FILE: postgresql_archive/src/configuration/theseus/matcher.rs ================================================ use semver::Version; /// Matcher for PostgreSQL binaries from /// /// # Errors /// * If the asset matcher fails. pub fn matcher(_url: &str, name: &str, version: &Version) -> crate::Result { let target = target_triple::TARGET; let expected_name = format!("postgresql-{version}-{target}.tar.gz"); Ok(name == expected_name) } #[cfg(test)] mod tests { use super::*; use crate::Result; #[test] fn test_asset_match_success() -> Result<()> { let url = ""; let version = Version::parse("16.4.0")?; let target = target_triple::TARGET; let name = format!("postgresql-{version}-{target}.tar.gz"); assert!(matcher(url, name.as_str(), &version)?, "{}", name); Ok(()) } #[test] fn test_asset_match_errors() -> Result<()> { let url = ""; let version = Version::parse("16.4.0")?; let target = target_triple::TARGET; let names = vec![ format!("foo-{version}-{target}.tar.gz"), format!("postgresql-{target}.tar.gz"), format!("postgresql-{version}.tar.gz"), format!("postgresql-{version}-{target}.tar"), format!("postgresql-{version}-{target}"), ]; for name in names { assert!(!matcher(url, name.as_str(), &version)?, "{}", name); } Ok(()) } } ================================================ FILE: postgresql_archive/src/configuration/theseus/mod.rs ================================================ mod extractor; mod matcher; pub const URL: &str = "https://github.com/theseus-rs/postgresql-binaries"; pub use extractor::extract; pub use matcher::matcher; ================================================ FILE: postgresql_archive/src/configuration/zonky/extractor.rs ================================================ use crate::Error::Unexpected; use crate::Result; use crate::extractor::{ExtractDirectories, tar_xz_extract}; use regex_lite::Regex; use std::fs::{create_dir_all, remove_dir_all, remove_file, rename}; use std::io::Cursor; use std::path::{Path, PathBuf}; use std::thread::sleep; use std::time::Duration; use tracing::{debug, instrument, warn}; use zip::ZipArchive; /// Extracts the compressed tar `bytes` to the [out_dir](Path). /// /// # Errors /// Returns an error if the extraction fails. #[expect(clippy::case_sensitive_file_extension_comparisons)] #[instrument(skip(bytes))] pub fn extract(bytes: &Vec, extract_directories: &ExtractDirectories) -> Result> { let out_dir = extract_directories.get_path(".")?; let parent_dir = if let Some(parent) = out_dir.parent() { parent } else { debug!("No parent directory for {}", out_dir.to_string_lossy()); out_dir.as_path() }; create_dir_all(parent_dir)?; let lock_file = acquire_lock(parent_dir)?; // If the directory already exists, then the archive has already been // extracted by another process. if out_dir.exists() { debug!( "Directory already exists {}; skipping extraction: ", out_dir.to_string_lossy() ); remove_file(&lock_file)?; return Ok(Vec::new()); } let extract_dir = tempfile::tempdir_in(parent_dir)?.keep(); debug!("Extracting archive to {}", extract_dir.to_string_lossy()); let reader = Cursor::new(bytes); let mut archive = ZipArchive::new(reader)?; let mut archive_bytes = Vec::new(); for i in 0..archive.len() { let mut file = archive.by_index(i)?; let file_name = file.name().to_string(); if file_name.ends_with(".txz") { debug!("Found archive file: {file_name}"); std::io::copy(&mut file, &mut archive_bytes)?; break; } } if archive_bytes.is_empty() { return Err(Unexpected("Failed to find archive file".to_string())); } let mut archive_extract_directories = ExtractDirectories::default(); archive_extract_directories.add_mapping(Regex::new(".*")?, extract_dir.clone()); let files = tar_xz_extract(&archive_bytes, &archive_extract_directories)?; if out_dir.exists() { debug!( "Directory already exists {}; skipping rename and removing extraction directory: {}", out_dir.to_string_lossy(), extract_dir.to_string_lossy() ); remove_dir_all(&extract_dir)?; } else { debug!( "Renaming {} to {}", extract_dir.to_string_lossy(), out_dir.to_string_lossy() ); rename(extract_dir, out_dir)?; } if lock_file.is_file() { debug!("Removing lock file: {}", lock_file.to_string_lossy()); remove_file(lock_file)?; } Ok(files) } /// Acquires a lock file in the [out_dir](Path) to prevent multiple processes from extracting the /// archive at the same time. /// /// # Errors /// * If the lock file cannot be acquired. #[instrument(level = "debug")] fn acquire_lock(out_dir: &Path) -> crate::Result { let lock_file = out_dir.join("postgresql-archive.lock"); if lock_file.is_file() { let metadata = lock_file.metadata()?; let created = metadata.created()?; if created.elapsed()?.as_secs() > 300 { warn!( "Stale lock file detected; removing file to attempt process recovery: {}", lock_file.to_string_lossy() ); remove_file(&lock_file)?; } } debug!( "Attempting to acquire lock: {}", lock_file.to_string_lossy() ); for _ in 0..30 { let lock = std::fs::OpenOptions::new() .create(true) .truncate(true) .write(true) .open(&lock_file); match lock { Ok(_) => { debug!("Lock acquired: {}", lock_file.to_string_lossy()); return Ok(lock_file); } Err(error) => { warn!("unable to acquire lock: {error}"); sleep(Duration::from_secs(1)); } } } Err(Unexpected("Failed to acquire lock".to_string())) } ================================================ FILE: postgresql_archive/src/configuration/zonky/matcher.rs ================================================ use crate::Result; use semver::Version; use std::env; /// Matcher for PostgreSQL binaries from /// /// # Errors /// * If the asset matcher fails. pub fn matcher(_url: &str, name: &str, version: &Version) -> Result { let os = get_os(); let arch = get_arch(); let expected_name = format!("embedded-postgres-binaries-{os}-{arch}-{version}.jar"); Ok(name == expected_name) } /// Returns the operating system of the current system. pub(crate) fn get_os() -> &'static str { match env::consts::OS { "macos" => "darwin", os => os, } } /// Returns the architecture of the current system. pub(crate) fn get_arch() -> &'static str { match env::consts::ARCH { "arm" => "arm32v7", "x86_64" => "amd64", "aarch64" => "arm64v8", "powerpc64" => "ppc64le", "x86" => "i386", arch => arch, } } #[cfg(test)] mod tests { use super::*; use crate::Result; #[test] fn test_asset_match_success() -> Result<()> { let url = ""; let os = get_os(); let arch = get_arch(); let version = Version::parse("16.4.0")?; let name = format!("embedded-postgres-binaries-{os}-{arch}-{version}.jar"); assert!(matcher(url, name.as_str(), &version)?, "{}", name); Ok(()) } #[test] fn test_asset_match_errors() -> Result<()> { let url = ""; let os = get_os(); let arch = get_arch(); let version = Version::parse("16.4.0")?; let names = vec![ format!("foo-{os}-{arch}-{version}.jar"), format!("embedded-postgres-binaries-{arch}-{version}.jar"), format!("embedded-postgres-binaries-{os}-{version}.jar"), format!("embedded-postgres-binaries-{os}-{arch}.jar"), format!("embedded-postgres-binaries-{os}-{arch}-{version}.zip"), ]; for name in names { assert!(!matcher(url, name.as_str(), &version)?, "{}", name); } Ok(()) } } ================================================ FILE: postgresql_archive/src/configuration/zonky/mod.rs ================================================ mod extractor; mod matcher; mod repository; pub const URL: &str = "https://github.com/zonkyio/embedded-postgres-binaries"; pub use extractor::extract; pub use matcher::matcher; pub use repository::Zonky; ================================================ FILE: postgresql_archive/src/configuration/zonky/repository.rs ================================================ use crate::Result; use crate::configuration::zonky::matcher::{get_arch, get_os}; use crate::repository::Archive; use crate::repository::maven::repository::Maven; use crate::repository::model::Repository; use async_trait::async_trait; use semver::{Version, VersionReq}; use tracing::instrument; /// Zonky repository. /// /// This repository is used to interact with Zonky Maven repositories /// (e.g. ). #[derive(Debug)] pub struct Zonky { maven: Box, } const MAVEN_URL: &str = "https://repo1.maven.org/maven2/io/zonky/test/postgres"; impl Zonky { /// Creates a new Zonky repository from the specified URL in the format /// /// /// # Errors /// * If the URL is invalid. #[expect(clippy::new_ret_no_self)] pub fn new(_url: &str) -> Result> { let os = get_os(); let arch = get_arch(); let archive = format!("embedded-postgres-binaries-{os}-{arch}"); let url = format!("{MAVEN_URL}/{archive}"); let maven = Maven::new(url.as_str())?; Ok(Box::new(Zonky { maven })) } } #[async_trait] impl Repository for Zonky { #[instrument(level = "debug")] fn name(&self) -> &str { "Zonky" } #[instrument(level = "debug")] async fn get_version(&self, version_req: &VersionReq) -> Result { self.maven.get_version(version_req).await } #[instrument] async fn get_archive(&self, version_req: &VersionReq) -> Result { self.maven.get_archive(version_req).await } } #[cfg(test)] mod tests { use super::*; use crate::configuration::zonky; #[test] fn test_name() { let zonky = Zonky::new(zonky::URL).unwrap(); assert_eq!("Zonky", zonky.name()); } // // get_version tests // #[tokio::test] async fn test_get_version() -> Result<()> { let maven = Zonky::new(zonky::URL)?; let version_req = VersionReq::STAR; let version = maven.get_version(&version_req).await?; assert!(version > Version::new(0, 0, 0)); Ok(()) } #[tokio::test] async fn test_get_specific_version() -> Result<()> { let zonky = Zonky::new(zonky::URL)?; let version_req = VersionReq::parse("=16.2.0")?; let version = zonky.get_version(&version_req).await?; assert_eq!(Version::new(16, 2, 0), version); Ok(()) } #[tokio::test] async fn test_get_specific_not_found() -> Result<()> { let zonky = Zonky::new(zonky::URL)?; let version_req = VersionReq::parse("=0.0.0")?; let error = zonky.get_version(&version_req).await.unwrap_err(); assert_eq!("version not found for '=0.0.0'", error.to_string()); Ok(()) } // // get_archive tests // #[tokio::test] async fn test_get_archive() -> Result<()> { let zonky = Zonky::new(zonky::URL)?; let os = get_os(); let arch = get_arch(); let version = Version::new(16, 2, 0); let version_req = VersionReq::parse(format!("={version}").as_str())?; let archive = zonky.get_archive(&version_req).await?; assert_eq!( format!("embedded-postgres-binaries-{os}-{arch}-{version}.jar"), archive.name() ); assert_eq!(&version, archive.version()); assert!(!archive.bytes().is_empty()); Ok(()) } } ================================================ FILE: postgresql_archive/src/error.rs ================================================ use std::sync::PoisonError; /// PostgreSQL archive result type pub type Result = core::result::Result; /// PostgreSQL archive errors #[derive(Debug, thiserror::Error)] pub enum Error { /// Asset not found #[error("asset not found")] AssetNotFound, /// Asset hash not found #[error("asset hash not found for asset '{0}'")] AssetHashNotFound(String), /// Error when the hash of the archive does not match the expected hash #[error("Archive hash [{archive_hash}] does not match expected hash [{hash}]")] ArchiveHashMismatch { archive_hash: String, hash: String }, /// Invalid version #[error("version '{0}' is invalid")] InvalidVersion(String), /// IO error #[error("{0}")] IoError(String), /// Parse error #[error("{0}")] ParseError(String), /// Poisoned lock #[error("poisoned lock '{0}'")] PoisonedLock(String), /// Repository failure #[error("{0}")] RepositoryFailure(String), /// Unexpected error #[error("{0}")] Unexpected(String), /// Unsupported extractor #[error("unsupported extractor for '{0}'")] UnsupportedExtractor(String), /// Unsupported hasher #[error("unsupported hasher for '{0}'")] UnsupportedHasher(String), /// Unsupported hasher #[error("unsupported matcher for '{0}'")] UnsupportedMatcher(String), /// Unsupported repository #[error("unsupported repository for '{0}'")] UnsupportedRepository(String), /// Version not found #[error("version not found for '{0}'")] VersionNotFound(String), } /// Converts a [`regex_lite::Error`] into an [`ParseError`](Error::ParseError) impl From for Error { fn from(error: regex_lite::Error) -> Self { Error::ParseError(error.to_string()) } } /// Converts a [`reqwest::Error`] into an [`IoError`](Error::IoError) impl From for Error { fn from(error: reqwest::Error) -> Self { Error::IoError(error.to_string()) } } /// Converts a [`reqwest_middleware::Error`] into an [`IoError`](Error::IoError) impl From for Error { fn from(error: reqwest_middleware::Error) -> Self { Error::IoError(error.to_string()) } } /// Converts a [`std::io::Error`] into an [`IoError`](Error::IoError) impl From for Error { fn from(error: std::io::Error) -> Self { Error::IoError(error.to_string()) } } /// Converts a [`std::time::SystemTimeError`] into an [`IoError`](Error::IoError) impl From for Error { fn from(error: std::time::SystemTimeError) -> Self { Error::IoError(error.to_string()) } } /// Converts a [`std::num::ParseIntError`] into an [`ParseError`](Error::ParseError) impl From for Error { fn from(error: std::num::ParseIntError) -> Self { Error::ParseError(error.to_string()) } } /// Converts a [`semver::Error`] into an [`ParseError`](Error::ParseError) impl From for Error { fn from(error: semver::Error) -> Self { Error::IoError(error.to_string()) } } /// Converts a [`std::path::StripPrefixError`] into an [`ParseError`](Error::ParseError) impl From for Error { fn from(error: std::path::StripPrefixError) -> Self { Error::ParseError(error.to_string()) } } /// Converts a [`url::ParseError`] into an [`ParseError`](Error::ParseError) impl From for Error { fn from(error: url::ParseError) -> Self { Error::ParseError(error.to_string()) } } #[cfg(feature = "maven")] /// Converts a [`quick_xml::DeError`] into a [`ParseError`](Error::ParseError) impl From for Error { fn from(error: quick_xml::DeError) -> Self { Error::ParseError(error.to_string()) } } #[cfg(feature = "zip")] /// Converts a [`zip::result::ZipError`] into a [`ParseError`](Error::Unexpected) impl From for Error { fn from(error: zip::result::ZipError) -> Self { Error::Unexpected(error.to_string()) } } /// Converts a [`std::sync::PoisonError`] into a [`ParseError`](Error::PoisonedLock) impl From> for Error { fn from(value: PoisonError) -> Self { Error::PoisonedLock(value.to_string()) } } /// These are relatively low value tests; they are here to reduce the coverage gap and /// ensure that the error conversions are working as expected. #[cfg(test)] mod test { use super::*; use anyhow::anyhow; use semver::VersionReq; use std::ops::Add; use std::path::PathBuf; use std::str::FromStr; use std::time::{Duration, SystemTime}; #[test] fn test_from_regex_error() { let regex_error = regex_lite::Regex::new("(?=a)").expect_err("regex error"); let error = Error::from(regex_error); assert_eq!(error.to_string(), "look-around is not supported"); } #[tokio::test] async fn test_from_reqwest_error() { let result = reqwest::get("https://a.com").await; assert!(result.is_err()); if let Err(error) = result { let error = Error::from(error); assert!(error.to_string().contains("error sending request")); } } #[tokio::test] async fn test_from_reqwest_middeleware_error() { let reqwest_middleware_error = reqwest_middleware::Error::Middleware(anyhow!("middleware error: test")); let error = Error::from(reqwest_middleware_error); assert!(error.to_string().contains("middleware error: test")); } #[test] fn test_from_io_error() { let io_error = std::io::Error::new(std::io::ErrorKind::NotFound, "test"); let error = Error::from(io_error); assert_eq!(error.to_string(), "test"); } #[test] fn test_from_parse_int_error() { let parse_int_error = u64::from_str("test").expect_err("parse int error"); let error = Error::from(parse_int_error); assert_eq!(error.to_string(), "invalid digit found in string"); } #[test] fn test_from_semver_error() { let semver_error = VersionReq::parse("foo").expect_err("semver error"); let error = Error::from(semver_error); assert_eq!( error.to_string(), "unexpected character 'f' while parsing major version number" ); } #[test] fn test_from_strip_prefix_error() { let path = PathBuf::from("test"); let strip_prefix_error = path.strip_prefix("foo").expect_err("strip prefix error"); let error = Error::from(strip_prefix_error); assert_eq!(error.to_string(), "prefix not found"); } #[test] fn test_from_system_time_error() { let future_time = SystemTime::now().add(Duration::from_secs(300)); let system_time_error = SystemTime::now() .duration_since(future_time) .expect_err("system time error"); let error = Error::from(system_time_error); assert_eq!( error.to_string(), "second time provided was later than self" ); } #[test] fn test_from_url_parse_error() { let parse_error = url::ParseError::EmptyHost; let error = Error::from(parse_error); assert_eq!(error.to_string(), "empty host"); } #[cfg(feature = "maven")] #[test] fn test_from_quick_xml_error() { let xml = ""; let quick_xml_error = quick_xml::de::from_str::(xml).expect_err("quick_xml error"); let error = Error::from(quick_xml_error); assert!(matches!(error, Error::ParseError(_))); } #[cfg(feature = "zip")] #[test] fn test_from_zip_error() { let zip_error = zip::result::ZipError::FileNotFound; let error = Error::from(zip_error); assert!(matches!(error, Error::Unexpected(_))); assert!( error .to_string() .contains("specified file not found in archive") ); } #[test] fn test_from_poisoned_lock() { let error = Error::from(std::sync::PoisonError::new(())); assert!(matches!(error, Error::PoisonedLock(_))); assert!(error.to_string().contains("poisoned lock")); } } ================================================ FILE: postgresql_archive/src/extractor/mod.rs ================================================ mod model; pub mod registry; #[cfg(feature = "tar-gz")] mod tar_gz_extractor; #[cfg(feature = "tar-xz")] mod tar_xz_extractor; #[cfg(feature = "zip")] mod zip_extractor; pub use model::ExtractDirectories; #[cfg(feature = "tar-gz")] pub use tar_gz_extractor::extract as tar_gz_extract; #[cfg(feature = "tar-xz")] pub use tar_xz_extractor::extract as tar_xz_extract; #[cfg(feature = "zip")] pub use zip_extractor::extract as zip_extract; ================================================ FILE: postgresql_archive/src/extractor/model.rs ================================================ use crate::{Error, Result}; use regex_lite::Regex; use std::fmt::Display; use std::path::PathBuf; /// Extract directories manage the directories to extract a file in an archive to based upon the /// associated regex matching the file path. #[derive(Debug)] pub struct ExtractDirectories { mappings: Vec<(Regex, PathBuf)>, } impl ExtractDirectories { /// Creates a new ExtractDirectories instance. #[must_use] pub fn new(mappings: Vec<(Regex, PathBuf)>) -> Self { Self { mappings } } /// Adds a new mapping to the ExtractDirectories instance. pub fn add_mapping(&mut self, regex: Regex, path: PathBuf) { self.mappings.push((regex, path)); } /// Returns the path associated with the first regex that matches the file path. /// If no regex matches, then the file path is returned. /// /// # Errors /// Returns an error if the file path cannot be converted to a string. pub fn get_path(&self, file_path: &str) -> Result { for (regex, path) in &self.mappings { if regex.is_match(file_path) { return Ok(path.clone()); } } Err(Error::Unexpected(format!( "No regex matched the file path: {file_path}" ))) } } /// Default implementation for ExtractDirectories. impl Default for ExtractDirectories { /// Creates a new ExtractDirectories instance with an empty mappings vector. fn default() -> Self { ExtractDirectories::new(Vec::new()) } } /// Display implementation for ExtractDirectories. impl Display for ExtractDirectories { /// Formats the ExtractDirectories instance. fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { for (regex, path) in &self.mappings { writeln!(f, "{} -> {}", regex, path.display())?; } Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_new() -> Result<()> { let mappings = vec![(Regex::new(".*")?, PathBuf::from("test"))]; let extract_directories = ExtractDirectories::new(mappings); let path = extract_directories.get_path("foo")?; assert_eq!("test", path.to_string_lossy()); Ok(()) } #[test] fn test_default() { let extract_directories = ExtractDirectories::default(); let result = extract_directories.get_path("foo"); assert!(result.is_err()); } #[test] fn test_add_mapping() -> Result<()> { let mut extract_directories = ExtractDirectories::default(); extract_directories.add_mapping(Regex::new(".*")?, PathBuf::from("test")); let path = extract_directories.get_path("foo")?; assert_eq!("test", path.to_string_lossy()); Ok(()) } #[test] fn test_get_path() -> Result<()> { let mappings = vec![ (Regex::new("test")?, PathBuf::from("test")), (Regex::new("foo")?, PathBuf::from("bar")), ]; let extract_directories = ExtractDirectories::new(mappings); let path = extract_directories.get_path("foo")?; assert_eq!("bar", path.to_string_lossy()); Ok(()) } #[test] fn test_display() -> Result<()> { let mappings = vec![ (Regex::new("test")?, PathBuf::from("test")), (Regex::new("foo")?, PathBuf::from("bar")), ]; let extract_directories = ExtractDirectories::new(mappings); let display = extract_directories.to_string(); assert_eq!("test -> test\nfoo -> bar\n", display); Ok(()) } } ================================================ FILE: postgresql_archive/src/extractor/registry.rs ================================================ use crate::Error::UnsupportedExtractor; use crate::Result; #[cfg(feature = "theseus")] use crate::configuration::theseus; #[cfg(feature = "zonky")] use crate::configuration::zonky; use crate::extractor::ExtractDirectories; use std::path::PathBuf; use std::sync::{Arc, LazyLock, Mutex, RwLock}; static REGISTRY: LazyLock>> = LazyLock::new(|| Arc::new(Mutex::new(RepositoryRegistry::default()))); type SupportsFn = fn(&str) -> Result; type ExtractFn = fn(&Vec, &ExtractDirectories) -> Result>; /// Singleton struct to store extractors #[expect(clippy::type_complexity)] struct RepositoryRegistry { extractors: Vec<(Arc>, Arc>)>, } impl RepositoryRegistry { /// Creates a new extractor registry. fn new() -> Self { Self { extractors: Vec::new(), } } /// Registers an extractor. Newly registered extractors take precedence over existing ones. fn register(&mut self, supports_fn: SupportsFn, extract_fn: ExtractFn) { self.extractors.insert( 0, ( Arc::new(RwLock::new(supports_fn)), Arc::new(RwLock::new(extract_fn)), ), ); } /// Gets an extractor that supports the specified URL /// /// # Errors /// * If the URL is not supported. fn get(&self, url: &str) -> Result { for (supports_fn, extractor_fn) in &self.extractors { let supports_function = supports_fn.read()?; if supports_function(url)? { let extractor_function = extractor_fn.read()?; return Ok(*extractor_function); } } Err(UnsupportedExtractor(url.to_string())) } } impl Default for RepositoryRegistry { /// Creates a new repository registry with the default repositories registered. fn default() -> Self { let mut registry = Self::new(); #[cfg(feature = "theseus")] registry.register(|url| Ok(url.starts_with(theseus::URL)), theseus::extract); #[cfg(feature = "zonky")] registry.register(|url| Ok(url.starts_with(zonky::URL)), zonky::extract); registry } } /// Registers an extractor. Newly registered extractors take precedence over existing ones. /// /// # Errors /// * If the registry is poisoned. pub fn register(supports_fn: SupportsFn, extractor_fn: ExtractFn) -> Result<()> { REGISTRY.lock()?.register(supports_fn, extractor_fn); Ok(()) } /// Gets an extractor that supports the specified URL /// /// # Errors /// * If the URL is not supported. pub fn get(url: &str) -> Result { REGISTRY.lock()?.get(url) } #[cfg(test)] mod tests { use super::*; use regex_lite::Regex; #[test] fn test_register() -> Result<()> { register(|url| Ok(url == "https://foo.com"), |_, _| Ok(Vec::new()))?; let url = "https://foo.com"; let extractor = get(url)?; let mut extract_directories = ExtractDirectories::default(); extract_directories.add_mapping(Regex::new(".*")?, PathBuf::from("test")); assert!(extractor(&Vec::new(), &extract_directories).is_ok()); Ok(()) } #[test] fn test_get_error() { let error = get("foo").unwrap_err(); assert_eq!("unsupported extractor for 'foo'", error.to_string()); } #[test] #[cfg(feature = "theseus")] fn test_get_theseus_postgresql_binaries() { assert!(get(theseus::URL).is_ok()); } } ================================================ FILE: postgresql_archive/src/extractor/tar_gz_extractor.rs ================================================ use crate::Error::Unexpected; use crate::Result; use crate::extractor::ExtractDirectories; use flate2::bufread::GzDecoder; use std::fs::{File, create_dir_all}; use std::io::{BufReader, Cursor, copy}; use std::path::PathBuf; use tar::Archive; use tracing::{debug, instrument, warn}; /// Extracts the compressed tar `bytes` to paths defined in `extract_directories`. /// /// # Errors /// Returns an error if the extraction fails. #[instrument(skip(bytes))] pub fn extract(bytes: &Vec, extract_directories: &ExtractDirectories) -> Result> { let mut files = Vec::new(); let input = BufReader::new(Cursor::new(bytes)); let decoder = GzDecoder::new(input); let mut archive = Archive::new(decoder); let mut extracted_bytes = 0; for archive_entry in archive.entries()? { let mut entry = archive_entry?; let entry_header = entry.header(); let entry_type = entry_header.entry_type(); let entry_size = entry_header.size()?; #[cfg(unix)] let file_mode = entry_header.mode()?; let entry_header_path = entry_header.path()?.to_path_buf(); let prefix = match entry_header_path.components().next() { Some(component) => component.as_os_str().to_str().unwrap_or_default(), None => { return Err(Unexpected( "Failed to get file header path prefix".to_string(), )); } }; let stripped_entry_header_path = entry_header_path.strip_prefix(prefix)?.to_path_buf(); let Ok(extract_dir) = extract_directories.get_path(prefix) else { continue; }; let mut entry_name = extract_dir.clone(); entry_name.push(stripped_entry_header_path); if entry_type.is_dir() || entry_name.is_dir() { create_dir_all(&entry_name)?; } else if entry_type.is_file() { let mut output_file = File::create(&entry_name)?; copy(&mut entry, &mut output_file)?; extracted_bytes += entry_size; #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; output_file.set_permissions(std::fs::Permissions::from_mode(file_mode))?; } files.push(entry_name); } else if entry_type.is_symlink() { #[cfg(unix)] if let Some(symlink_target) = entry.link_name()? { let symlink_path = entry_name.clone(); std::os::unix::fs::symlink(symlink_target.as_ref(), symlink_path)?; files.push(entry_name); } } } let number_of_files = files.len(); debug!("Extracted {number_of_files} files totalling {extracted_bytes}"); Ok(files) } ================================================ FILE: postgresql_archive/src/extractor/tar_xz_extractor.rs ================================================ use crate::Error::Unexpected; use crate::Result; use crate::extractor::ExtractDirectories; use liblzma::bufread::XzDecoder; use std::fs::{File, create_dir_all}; use std::io::{BufReader, Cursor, copy}; use std::path::PathBuf; use tar::Archive; use tracing::{debug, instrument, warn}; /// Extracts the compressed tar `bytes` to paths defined in `extract_directories`. /// /// # Errors /// Returns an error if the extraction fails. #[instrument(skip(bytes))] pub fn extract(bytes: &Vec, extract_directories: &ExtractDirectories) -> Result> { let mut files = Vec::new(); let input = BufReader::new(Cursor::new(bytes)); let decoder = XzDecoder::new(input); let mut archive = Archive::new(decoder); let mut extracted_bytes = 0; for archive_entry in archive.entries()? { let mut entry = archive_entry?; let entry_header = entry.header(); let entry_type = entry_header.entry_type(); let entry_size = entry_header.size()?; #[cfg(unix)] let file_mode = entry_header.mode()?; let entry_header_path = entry_header.path()?.to_path_buf(); let prefix = match entry_header_path.components().next() { Some(component) => component.as_os_str().to_str().unwrap_or_default(), None => { return Err(Unexpected( "Failed to get file header path prefix".to_string(), )); } }; let Ok(extract_dir) = extract_directories.get_path(prefix) else { continue; }; let mut entry_name = extract_dir.clone(); entry_name.push(entry_header_path); if entry_type.is_dir() || entry_name.is_dir() { create_dir_all(&entry_name)?; } else if entry_type.is_file() { if let Some(parent) = entry_name.parent() { create_dir_all(parent)?; } let mut output_file = File::create(&entry_name)?; copy(&mut entry, &mut output_file)?; extracted_bytes += entry_size; #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; output_file.set_permissions(std::fs::Permissions::from_mode(file_mode))?; } files.push(entry_name); } else if entry_type.is_symlink() { #[cfg(unix)] if let Some(symlink_target) = entry.link_name()? { let symlink_path = entry_name.clone(); std::os::unix::fs::symlink(symlink_target.as_ref(), symlink_path)?; files.push(entry_name); } } } let number_of_files = files.len(); debug!("Extracted {number_of_files} files totalling {extracted_bytes}"); Ok(files) } ================================================ FILE: postgresql_archive/src/extractor/zip_extractor.rs ================================================ use crate::Result; use crate::extractor::ExtractDirectories; use std::fs::create_dir_all; use std::io::Cursor; use std::path::PathBuf; use std::{fs, io}; use tracing::{debug, instrument, warn}; use zip::ZipArchive; /// Extracts the compressed tar `bytes` to paths defined in `extract_directories`. /// /// # Errors /// Returns an error if the extraction fails. #[instrument(skip(bytes))] pub fn extract(bytes: &Vec, extract_directories: &ExtractDirectories) -> Result> { let mut files = Vec::new(); let reader = Cursor::new(bytes); let mut archive = ZipArchive::new(reader)?; let mut extracted_bytes = 0; for i in 0..archive.len() { let mut file = archive.by_index(i)?; let file_path = PathBuf::from(file.name()); let file_path = PathBuf::from(file_path.file_name().unwrap_or_default()); let file_name = file_path.to_string_lossy(); let Ok(extract_dir) = extract_directories.get_path(&file_name) else { continue; }; create_dir_all(&extract_dir)?; let mut out = Vec::new(); io::copy(&mut file, &mut out)?; extracted_bytes += out.len() as u64; let path = PathBuf::from(&extract_dir).join(file_path); fs::write(&path, out)?; files.push(path); } let number_of_files = files.len(); debug!("Extracted {number_of_files} files totalling {extracted_bytes}"); Ok(files) } ================================================ FILE: postgresql_archive/src/hasher/md5.rs ================================================ use crate::Result; use md5::{Digest, Md5}; /// Hashes the data using MD5. /// /// # Errors /// * If the data cannot be hashed. pub fn hash(data: &Vec) -> Result { let mut hasher = Md5::new(); hasher.update(data); let hash = hex::encode(hasher.finalize()); Ok(hash) } #[cfg(test)] mod tests { use super::*; #[test] fn test_hash() -> Result<()> { let data = vec![4, 2]; let hash = hash(&data)?; assert_eq!("21fb3d1d1a91a7e80dff456205f3380b", hash); Ok(()) } } ================================================ FILE: postgresql_archive/src/hasher/mod.rs ================================================ #[cfg(feature = "md5")] pub mod md5; pub mod registry; #[cfg(feature = "sha1")] pub mod sha1; #[cfg(feature = "sha2")] pub mod sha2_256; #[cfg(feature = "sha2")] pub mod sha2_512; ================================================ FILE: postgresql_archive/src/hasher/registry.rs ================================================ use crate::Error::UnsupportedHasher; use crate::Result; #[cfg(feature = "theseus")] use crate::configuration::theseus; #[cfg(feature = "md5")] use crate::hasher::md5; #[cfg(feature = "sha1")] use crate::hasher::sha1; #[cfg(feature = "sha2")] use crate::hasher::sha2_256; #[cfg(all(feature = "sha2", feature = "maven"))] use crate::hasher::sha2_512; #[cfg(feature = "maven")] use crate::repository::maven; use std::sync::{Arc, LazyLock, Mutex, RwLock}; static REGISTRY: LazyLock>> = LazyLock::new(|| Arc::new(Mutex::new(HasherRegistry::default()))); pub type SupportsFn = fn(&str, &str) -> Result; pub type HasherFn = fn(&Vec) -> Result; /// Singleton struct to store hashers #[expect(clippy::type_complexity)] struct HasherRegistry { hashers: Vec<(Arc>, Arc>)>, } impl HasherRegistry { /// Creates a new hasher registry. fn new() -> Self { Self { hashers: Vec::new(), } } /// Registers a hasher for a supports function. Newly registered hashers will take precedence /// over existing ones. fn register(&mut self, supports_fn: SupportsFn, hasher_fn: HasherFn) { self.hashers.insert( 0, ( Arc::new(RwLock::new(supports_fn)), Arc::new(RwLock::new(hasher_fn)), ), ); } /// Get a hasher for the specified url and extension. /// /// # Errors /// * If the registry is poisoned. fn get>(&self, url: S, extension: S) -> Result { let url = url.as_ref(); let extension = extension.as_ref(); for (supports_fn, hasher_fn) in &self.hashers { let supports_function = supports_fn.read()?; if supports_function(url, extension)? { let hasher_function = hasher_fn.read()?; return Ok(*hasher_function); } } Err(UnsupportedHasher(url.to_string())) } } impl Default for HasherRegistry { /// Creates a new hasher registry with the default hashers registered. fn default() -> Self { let mut registry = Self::new(); #[cfg(feature = "theseus")] registry.register( |url, extension| Ok(url.starts_with(theseus::URL) && extension == "sha256"), sha2_256::hash, ); // Register the Maven hashers: https://maven.apache.org/resolver/about-checksums.html#implemented-checksum-algorithms #[cfg(feature = "maven")] registry.register( |url, extension| Ok(url.starts_with(maven::URL) && extension == "md5"), md5::hash, ); #[cfg(feature = "maven")] registry.register( |url, extension| Ok(url.starts_with(maven::URL) && extension == "sha1"), sha1::hash, ); #[cfg(feature = "maven")] registry.register( |url, extension| Ok(url.starts_with(maven::URL) && extension == "sha256"), sha2_256::hash, ); #[cfg(feature = "maven")] registry.register( |url, extension| Ok(url.starts_with(maven::URL) && extension == "sha512"), sha2_512::hash, ); registry } } /// Registers a hasher for a supports function. Newly registered hashers will take precedence /// over existing ones. /// /// # Errors /// * If the registry is poisoned. pub fn register(supports_fn: SupportsFn, hasher_fn: HasherFn) -> Result<()> { REGISTRY.lock()?.register(supports_fn, hasher_fn); Ok(()) } /// Get a hasher for the specified url and extension. /// /// # Errors /// * If the registry is poisoned. pub fn get>(url: S, extension: S) -> Result { REGISTRY.lock()?.get(url, extension) } #[cfg(test)] mod tests { use super::*; fn test_hasher(extension: &str, expected: &str) -> Result<()> { let hasher = get("https://foo.com", extension)?; let data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 0]; let hash = hasher(&data)?; assert_eq!(expected, hash); Ok(()) } #[test] fn test_register() -> Result<()> { register( |_, extension| Ok(extension == "test"), |_| Ok("42".to_string()), )?; test_hasher("test", "42") } #[test] fn test_get_invalid_url_error() { let error = get("https://foo.com", "foo").unwrap_err(); assert_eq!( "unsupported hasher for 'https://foo.com'", error.to_string() ); } #[test] #[cfg(feature = "theseus")] fn test_get_invalid_extension_error() { let error = get(theseus::URL, "foo").unwrap_err(); assert_eq!( format!("unsupported hasher for '{}'", theseus::URL), error.to_string() ); } #[test] #[cfg(feature = "theseus")] fn test_get_theseus_postgresql_binaries() { assert!(get(theseus::URL, "sha256").is_ok()); } #[test] #[cfg(feature = "maven")] fn test_get_zonky_postgresql_binaries() { assert!(get(maven::URL, "sha512").is_ok()); } } ================================================ FILE: postgresql_archive/src/hasher/sha1.rs ================================================ use crate::Result; use sha1::{Digest, Sha1}; /// Hashes the data using SHA1. /// /// # Errors /// * If the data cannot be hashed. pub fn hash(data: &Vec) -> Result { let mut hasher = Sha1::new(); hasher.update(data); let hash = hex::encode(hasher.finalize()); Ok(hash) } #[cfg(test)] mod tests { use super::*; #[test] fn test_hash() -> Result<()> { let data = vec![4, 2]; let hash = hash(&data)?; assert_eq!("1f3e1678e699640dfa5173d3a52b004f5e164d87", hash); Ok(()) } } ================================================ FILE: postgresql_archive/src/hasher/sha2_256.rs ================================================ use crate::Result; use sha2::{Digest, Sha256}; /// Hashes the data using SHA2-256. /// /// # Errors /// * If the data cannot be hashed. pub fn hash(data: &Vec) -> Result { let mut hasher = Sha256::new(); hasher.update(data); let hash = hex::encode(hasher.finalize()); Ok(hash) } #[cfg(test)] mod tests { use super::*; #[test] fn test_hash() -> Result<()> { let data = vec![4, 2]; let hash = hash(&data)?; assert_eq!( "b7586d310e5efb1b7d10a917ba5af403adbf54f4f77fe7fdcb4880a95dac7e7e", hash ); Ok(()) } } ================================================ FILE: postgresql_archive/src/hasher/sha2_512.rs ================================================ use crate::Result; use sha2::{Digest, Sha512}; /// Hashes the data using SHA2-512. /// /// # Errors /// * If the data cannot be hashed. pub fn hash(data: &Vec) -> Result { let mut hasher = Sha512::new(); hasher.update(data); let hash = hex::encode(hasher.finalize()); Ok(hash) } #[cfg(test)] mod tests { use super::*; #[test] fn test_hash() -> Result<()> { let data = vec![4, 2]; let hash = hash(&data)?; assert_eq!( "7df6418d1791a6fe80e726319f16f107534a663346f99e0d155e359a54f6c74391e2f3be19c995c3c903926d348bd86c339bd982e10f09aa776e4ff85d36387a", hash ); Ok(()) } } ================================================ FILE: postgresql_archive/src/lib.rs ================================================ //! # postgresql_archive //! //! [![Code Coverage](https://codecov.io/gh/theseus-rs/postgresql-embedded/branch/main/graph/badge.svg)](https://codecov.io/gh/theseus-rs/postgresql-embedded) //! [![Benchmarks](https://img.shields.io/badge/%F0%9F%90%B0_bencher-enabled-6ec241)](https://bencher.dev/perf/theseus-rs-postgresql-embedded) //! [![License](https://img.shields.io/crates/l/postgresql_archive?)](https://github.com/theseus-rs/postgresql-embedded/tree/main/postgresql_archive#license) //! [![Semantic Versioning](https://img.shields.io/badge/%E2%9A%99%EF%B8%8F_SemVer-2.0.0-blue)](https://semver.org/spec/v2.0.0.html) //! //! Retrieve and extract PostgreSQL on Linux, MacOS or Windows. //! //! ## Table of contents //! //! - [Examples](#examples) //! - [Feature flags](#feature-flags) //! - [Supported platforms](#supported-platforms) //! - [Safety](#safety) //! - [License](#license) //! - [Notes](#notes) //! //! ## Examples //! //! ### Asynchronous API //! //! ```no_run //! use postgresql_archive::{extract, get_archive, Result, VersionReq }; //! use postgresql_archive::configuration::theseus; //! //! #[tokio::main] //! async fn main() -> Result<()> { //! let url = theseus::URL; //! let (archive_version, archive) = get_archive(url, &VersionReq::STAR).await?; //! let out_dir = std::env::temp_dir(); //! let files = extract(url, &archive, &out_dir).await?; //! Ok(()) //! } //! ``` //! //! ### Synchronous API //! ```no_run //! #[cfg(feature = "blocking")] { //! use postgresql_archive::configuration::theseus; //! use postgresql_archive::VersionReq; //! use postgresql_archive::blocking::{extract, get_archive}; //! //! let url = theseus::URL; //! let (archive_version, archive) = get_archive(url, &VersionReq::STAR).unwrap(); //! let out_dir = std::env::temp_dir(); //! let result = extract(url, &archive, &out_dir).unwrap(); //! } //! ``` //! //! ## Feature flags //! //! postgresql_archive uses [feature flags] to address compile time and binary size //! uses. //! //! The following features are available: //! //! | Name | Description | Default? | //! |--------------|----------------------------|----------| //! | `blocking` | Enables the blocking API | No | //! | `native-tls` | Enables native-tls support | Yes | //! | `rustls` | Enables rustls support | No | //! //! ### Configurations //! //! | Name | Description | Default? | //! |-----------|-------------------------------------|----------| //! | `theseus` | Enables theseus PostgreSQL binaries | Yes | //! | `zonky` | Enables zonky PostgreSQL binaries | No | //! //! ### Extractors //! //! | Name | Description | Default? | //! |----------|--------------------------|----------| //! | `tar-gz` | Enables tar gz extractor | Yes | //! | `tar-xz` | Enables tar xz extractor | No | //! | `zip` | Enables zip extractor | No | //! //! ### Hashers //! //! | Name | Description | Default? | //! |--------|----------------------|----------| //! | `md5` | Enables md5 hashers | No | //! | `sha1` | Enables sha1 hashers | No | //! | `sha2` | Enables sha2 hashers | Yes¹ | //! //! ¹ enabled by the `theseus` feature flag. //! //! ### Repositories //! //! | Name | Description | Default? | //! |----------|---------------------------|----------| //! | `github` | Enables github repository | Yes¹ | //! | `maven` | Enables maven repository | No | //! //! ¹ enabled by the `theseus` feature flag. //! //! ## Supported platforms //! //! `postgresql_archive` provides implementations for the following: //! //! * [theseus-rs/postgresql-binaries](https://github.com/theseus-rs/postgresql-binaries) //! * [zonkyio/embedded-postgres-binaries](https://github.com/zonkyio/embedded-postgres-binaries) //! //! ## Safety //! //! This crate uses `#![forbid(unsafe_code)]` to ensure everything is implemented in 100% safe Rust. //! //! ## License //! //! Licensed under either of //! //! * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or ) //! * MIT license ([LICENSE-MIT](LICENSE-MIT) or ) //! //! at your option. //! //! PostgreSQL is covered under [The PostgreSQL License](https://opensource.org/licenses/postgresql). mod archive; #[cfg(feature = "blocking")] pub mod blocking; pub mod configuration; mod error; pub mod extractor; pub mod hasher; pub mod matcher; pub mod repository; mod version; pub use archive::{extract, get_archive, get_version}; pub use error::{Error, Result}; pub use semver::{Version, VersionReq}; pub use version::{ExactVersion, ExactVersionReq}; ================================================ FILE: postgresql_archive/src/matcher/mod.rs ================================================ pub mod registry; ================================================ FILE: postgresql_archive/src/matcher/registry.rs ================================================ use crate::Error::UnsupportedMatcher; use crate::Result; #[cfg(feature = "theseus")] use crate::configuration::theseus; #[cfg(feature = "zonky")] use crate::configuration::zonky; use semver::Version; use std::sync::{Arc, LazyLock, Mutex, RwLock}; static REGISTRY: LazyLock>> = LazyLock::new(|| Arc::new(Mutex::new(MatchersRegistry::default()))); pub type SupportsFn = fn(&str) -> Result; pub type MatcherFn = fn(&str, &str, &Version) -> Result; /// Singleton struct to store matchers #[expect(clippy::type_complexity)] struct MatchersRegistry { matchers: Vec<(Arc>, Arc>)>, } impl MatchersRegistry { /// Creates a new matcher registry. fn new() -> Self { Self { matchers: Vec::new(), } } /// Registers a matcher for a supports function. Newly registered matchers will take precedence /// over existing ones. fn register(&mut self, supports_fn: SupportsFn, matcher_fn: MatcherFn) { self.matchers.insert( 0, ( Arc::new(RwLock::new(supports_fn)), Arc::new(RwLock::new(matcher_fn)), ), ); } /// Get a matcher for the specified URL. /// /// # Errors /// * If the registry is poisoned. fn get>(&self, url: S) -> Result { let url = url.as_ref(); for (supports_fn, matcher_fn) in &self.matchers { let supports_function = supports_fn.read()?; if supports_function(url)? { let matcher_function = matcher_fn.read()?; return Ok(*matcher_function); } } Err(UnsupportedMatcher(url.to_string())) } } impl Default for MatchersRegistry { /// Creates a new matcher registry with the default matchers registered. fn default() -> Self { let mut registry = Self::new(); #[cfg(feature = "theseus")] registry.register(|url| Ok(url == theseus::URL), theseus::matcher); #[cfg(feature = "zonky")] registry.register(|url| Ok(url == zonky::URL), zonky::matcher); registry } } /// Registers a matcher for a supports function. Newly registered matchers will take precedence over /// existing ones. /// /// # Errors /// * If the registry is poisoned. pub fn register(supports_fn: SupportsFn, matcher_fn: MatcherFn) -> Result<()> { REGISTRY.lock()?.register(supports_fn, matcher_fn); Ok(()) } /// Get a matcher for the specified URL. /// /// # Errors /// * If the registry is poisoned. pub fn get>(url: S) -> Result { REGISTRY.lock()?.get(url) } #[cfg(test)] mod tests { use super::*; #[test] fn test_register() -> Result<()> { register( |url| Ok(url == "https://foo.com"), |_url, name, _version| Ok(name == "foo"), )?; let matcher = get("https://foo.com")?; let version = Version::new(16, 3, 0); assert!(matcher("", "foo", &version)?); Ok(()) } #[test] fn test_get_error() { let result = get("foo").unwrap_err(); assert_eq!("unsupported matcher for 'foo'", result.to_string()); } #[test] #[cfg(feature = "theseus")] fn test_get_theseus_postgresql_binaries() { assert!(get(theseus::URL).is_ok()); } #[test] #[cfg(feature = "zonky")] fn test_get_zonyk_postgresql_binaries() { assert!(get(zonky::URL).is_ok()); } } ================================================ FILE: postgresql_archive/src/repository/github/mod.rs ================================================ pub(crate) mod models; pub mod repository; ================================================ FILE: postgresql_archive/src/repository/github/models.rs ================================================ //! Structs for GitHub API responses use serde::{Deserialize, Serialize}; /// Represents a GitHub release #[derive(Clone, Debug, Deserialize, Serialize)] pub(crate) struct Release { pub url: String, pub assets_url: String, pub upload_url: String, pub html_url: String, pub id: i64, pub tag_name: String, pub name: String, pub draft: bool, pub prerelease: bool, pub assets: Vec, } /// Represents a GitHub asset #[derive(Clone, Debug, Deserialize, Serialize)] pub(crate) struct Asset { pub url: String, pub id: i64, pub node_id: String, pub name: String, pub label: String, pub content_type: String, pub state: String, pub size: i64, pub browser_download_url: String, } ================================================ FILE: postgresql_archive/src/repository/github/repository.rs ================================================ use crate::Error::{ ArchiveHashMismatch, AssetHashNotFound, AssetNotFound, RepositoryFailure, VersionNotFound, }; use crate::hasher::registry::HasherFn; use crate::repository::Archive; use crate::repository::github::models::{Asset, Release}; use crate::repository::model::Repository; use crate::{Result, hasher, matcher}; use async_trait::async_trait; use futures_util::StreamExt; use regex_lite::Regex; use reqwest::header::HeaderMap; use reqwest_middleware::{ClientBuilder, ClientWithMiddleware}; use reqwest_retry::RetryTransientMiddleware; use reqwest_retry::policies::ExponentialBackoff; use reqwest_tracing::TracingMiddleware; use semver::{Version, VersionReq}; use std::env; use std::io::Write; use std::str::FromStr; use std::sync::LazyLock; use tracing::{debug, instrument, warn}; #[cfg(feature = "indicatif")] use tracing_indicatif::span_ext::IndicatifSpanExt; use url::Url; const GITHUB_API_VERSION_HEADER: &str = "X-GitHub-Api-Version"; const GITHUB_API_VERSION: &str = "2022-11-28"; static GITHUB_TOKEN: LazyLock> = LazyLock::new(|| match env::var("GITHUB_TOKEN") { Ok(token) => { debug!("GITHUB_TOKEN environment variable found"); Some(token) } Err(_) => None, }); static USER_AGENT: LazyLock = LazyLock::new(|| { format!( "{PACKAGE}/{VERSION}", PACKAGE = env!("CARGO_PKG_NAME"), VERSION = env!("CARGO_PKG_VERSION") ) }); /// GitHub repository. /// /// This repository is used to interact with GitHub. The configuration url should be /// in the format /// (e.g. ). #[derive(Debug)] pub struct GitHub { url: String, releases_url: String, } impl GitHub { /// Creates a new GitHub repository from the specified URL in the format /// /// /// # Errors /// * If the URL is invalid. #[expect(clippy::new_ret_no_self)] pub fn new(url: &str) -> Result> { let parsed_url = Url::parse(url)?; let path = parsed_url.path().trim_start_matches('/'); let path_parts = path.split('/').collect::>(); let owner = (*path_parts .first() .ok_or_else(|| RepositoryFailure(format!("No owner in URL {url}")))?) .to_string(); let repo = (*path_parts .get(1) .ok_or_else(|| RepositoryFailure(format!("No repo in URL {url}")))?) .to_string(); let releases_url = format!("https://api.github.com/repos/{owner}/{repo}/releases"); Ok(Box::new(Self { url: url.to_string(), releases_url, })) } /// Gets the version from the specified tag name. /// /// # Errors /// * If the version cannot be parsed. fn get_version_from_tag_name(tag_name: &str) -> Result { // Trim and prefix characters from the tag name (e.g., "v16.4.0" -> "16.4.0"). let tag_name = tag_name.trim_start_matches(|c: char| !c.is_numeric()); match Version::from_str(tag_name) { Ok(version) => Ok(version), Err(error) => { warn!("Failed to parse version {tag_name}"); Err(error.into()) } } } /// Gets the release for the specified [version requirement](VersionReq). If a release for the /// [version requirement](VersionReq) is not found, then an error is returned. /// /// # Errors /// * If the release is not found. #[instrument(level = "debug")] async fn get_release(&self, version_req: &VersionReq) -> Result { debug!("Attempting to locate release for version requirement {version_req}"); let client = reqwest_client(); let mut result: Option = None; let mut page = 1; loop { let request = client .get(&self.releases_url) .headers(Self::headers()) .query(&[("page", page.to_string().as_str()), ("per_page", "100")]); let response = request.send().await?.error_for_status()?; let response_releases = response.json::>().await?; if response_releases.is_empty() { break; } for release in response_releases { let tag_name = release.tag_name.clone(); let Ok(release_version) = Self::get_version_from_tag_name(tag_name.as_str()) else { warn!("Failed to parse release version {tag_name}"); continue; }; if version_req.matches(&release_version) { if let Some(result_release) = &result { let result_version = Self::get_version_from_tag_name(result_release.tag_name.as_str())?; if release_version > result_version { result = Some(release); } } else { result = Some(release); } } } page += 1; } match result { Some(release) => { let version = Self::get_version_from_tag_name(&release.tag_name)?; debug!("Version {version} found for version requirement {version_req}"); Ok(release) } None => Err(VersionNotFound(version_req.to_string())), } } /// Gets the asset for the specified release that passes the supplied matcher. If an asset for /// that passes the matcher is not found, then an [AssetNotFound] error is returned. /// /// # Errors /// * If the asset is not found. #[instrument(level = "debug", skip(version, release))] fn get_asset( &self, version: &Version, release: &Release, ) -> Result<(Asset, Option, Option)> { let matcher = matcher::registry::get(&self.url)?; let mut release_asset: Option = None; for asset in &release.assets { if matcher(&self.url, asset.name.as_str(), version)? { release_asset = Some(asset.clone()); break; } } let Some(asset) = release_asset else { return Err(AssetNotFound); }; // Attempt to find the asset hash for the asset. let mut asset_hash: Option = None; let mut asset_hasher_fn: Option = None; for release_asset in &release.assets { let release_asset_name = release_asset.name.as_str(); if !release_asset_name.starts_with(&asset.name) { continue; } let extension = release_asset_name .strip_prefix(format!("{}.", asset.name.as_str()).as_str()) .unwrap_or_default(); if let Ok(hasher_fn) = hasher::registry::get(&self.url, &extension.to_string()) { asset_hash = Some(release_asset.clone()); asset_hasher_fn = Some(hasher_fn); break; } } Ok((asset, asset_hash, asset_hasher_fn)) } /// Returns the headers for the GitHub request. fn headers() -> HeaderMap { let mut headers = HeaderMap::new(); headers.append( GITHUB_API_VERSION_HEADER, GITHUB_API_VERSION.parse().unwrap(), ); headers.append("User-Agent", USER_AGENT.parse().unwrap()); if let Some(token) = &*GITHUB_TOKEN { headers.append("Authorization", format!("Bearer {token}").parse().unwrap()); } headers } } #[async_trait] impl Repository for GitHub { #[instrument(level = "debug")] fn name(&self) -> &str { "GitHub" } #[instrument(level = "debug")] async fn get_version(&self, version_req: &VersionReq) -> Result { let release = self.get_release(version_req).await?; let version = Self::get_version_from_tag_name(release.tag_name.as_str())?; Ok(version) } #[instrument] async fn get_archive(&self, version_req: &VersionReq) -> Result { let release = self.get_release(version_req).await?; let version = Self::get_version_from_tag_name(release.tag_name.as_str())?; let (asset, asset_hash, asset_hasher_fn) = self.get_asset(&version, &release)?; let name = asset.name.clone(); let client = reqwest_client(); debug!("Downloading archive {}", asset.browser_download_url); let request = client .get(&asset.browser_download_url) .headers(Self::headers()); let response = request.send().await?.error_for_status()?; #[cfg(feature = "indicatif")] let span = tracing::Span::current(); #[cfg(feature = "indicatif")] { let content_length = response.content_length().unwrap_or_default(); span.pb_set_length(content_length); } let mut bytes = Vec::new(); let mut source = response.bytes_stream(); while let Some(chunk) = source.next().await { bytes.write_all(&chunk?)?; #[cfg(feature = "indicatif")] span.pb_set_position(bytes.len() as u64); } debug!( "Archive {} downloaded: {}", asset.browser_download_url, bytes.len(), ); if let Some(asset_hash) = asset_hash { let archive_hash = match asset_hasher_fn { Some(hasher_fn) => hasher_fn(&bytes)?, None => return Err(AssetHashNotFound(asset.name))?, }; let hash_len = archive_hash.len(); debug!( "Downloading archive hash {}", asset_hash.browser_download_url ); let request = client .get(&asset_hash.browser_download_url) .headers(Self::headers()); let response = request.send().await?.error_for_status()?; let text = response.text().await?; let re = Regex::new(&format!(r"[0-9a-f]{{{hash_len}}}"))?; let hash = match re.find(&text) { Some(hash) => hash.as_str().to_string(), None => return Err(AssetHashNotFound(asset.name)), }; debug!( "Archive hash {} downloaded: {}", asset_hash.browser_download_url, text.len(), ); if archive_hash != hash { return Err(ArchiveHashMismatch { archive_hash, hash }); } } let archive = Archive::new(name, version, bytes); Ok(archive) } } /// Creates a new reqwest client with middleware for tracing, and retrying transient errors. fn reqwest_client() -> ClientWithMiddleware { let retry_policy = ExponentialBackoff::builder().build_with_max_retries(3); ClientBuilder::new(reqwest::Client::new()) .with(TracingMiddleware::default()) .with(RetryTransientMiddleware::new_with_policy(retry_policy)) .build() } #[cfg(test)] mod tests { use super::*; use crate::configuration::theseus::URL; #[test] fn test_name() { let github = GitHub::new(URL).unwrap(); assert_eq!("GitHub", github.name()); } #[test] fn test_get_version_from_tag_name() -> Result<()> { let versions = vec!["16.4.0", "v16.4.0"]; for version in versions { let version = GitHub::get_version_from_tag_name(version)?; assert_eq!(Version::new(16, 4, 0), version); } Ok(()) } #[test] fn test_get_version_from_tag_name_error() { let error = GitHub::get_version_from_tag_name("foo").unwrap_err(); assert_eq!( "empty string, expected a semver version".to_string(), error.to_string() ); } // // get_version tests // #[tokio::test] async fn test_get_version() -> Result<()> { let github = GitHub::new(URL)?; let version_req = VersionReq::STAR; let version = github.get_version(&version_req).await?; assert!(version > Version::new(0, 0, 0)); Ok(()) } #[tokio::test] async fn test_get_specific_version() -> Result<()> { let github = GitHub::new(URL)?; let version_req = VersionReq::parse("=16.4.0")?; let version = github.get_version(&version_req).await?; assert_eq!(Version::new(16, 4, 0), version); Ok(()) } #[tokio::test] async fn test_get_specific_not_found() -> Result<()> { let github = GitHub::new(URL)?; let version_req = VersionReq::parse("=0.0.0")?; let error = github.get_version(&version_req).await.unwrap_err(); assert_eq!("version not found for '=0.0.0'", error.to_string()); Ok(()) } // // get_archive tests // #[tokio::test] async fn test_get_archive() -> Result<()> { let github = GitHub::new(URL)?; let version_req = VersionReq::parse("=16.4.0")?; let archive = github.get_archive(&version_req).await?; assert_eq!( format!("postgresql-16.4.0-{}.tar.gz", target_triple::TARGET), archive.name() ); assert_eq!(&Version::new(16, 4, 0), archive.version()); assert!(!archive.bytes().is_empty()); Ok(()) } // // Plugin Support // /// Test that a version with a 'v' prefix is correctly parsed; this is a common convention /// for GitHub releases. Use a known PostgreSQL plugin repository for the test. #[tokio::test] async fn test_get_version_with_v_prefix() -> Result<()> { let github = GitHub::new("https://github.com/turbot/steampipe-plugin-csv")?; let version_req = VersionReq::parse("=0.12.0")?; let version = github.get_version(&version_req).await?; assert_eq!(Version::new(0, 12, 0), version); Ok(()) } } ================================================ FILE: postgresql_archive/src/repository/maven/mod.rs ================================================ pub(crate) mod models; pub mod repository; pub const URL: &str = "https://repo1.maven.org/maven2"; ================================================ FILE: postgresql_archive/src/repository/maven/models.rs ================================================ /// Maven metadata XML structure /// /// ```xml /// /// io.zonky.test.postgres /// embedded-postgres-binaries-linux-amd64 /// /// 16.2.0 /// 16.2.0 /// /// ... /// 15.6.0 /// 16.2.0 /// /// 20240210235512 /// /// /// ``` use serde::{Deserialize, Serialize}; /// Represents a Maven artifact metadata #[derive(Clone, Debug, Deserialize, Serialize)] pub(crate) struct Metadata { #[serde(rename = "groupId")] pub(crate) group_id: String, #[serde(rename = "artifactId")] pub(crate) artifact_id: String, pub(crate) versioning: Versioning, } /// Represents Maven versioning information #[derive(Clone, Debug, Deserialize, Serialize)] pub(crate) struct Versioning { pub(crate) latest: String, pub(crate) release: String, pub(crate) versions: Versions, #[serde(rename = "lastUpdated")] pub(crate) last_updated: String, } /// Represents Maven versions #[derive(Clone, Debug, Deserialize, Serialize)] pub(crate) struct Versions { pub(crate) version: Vec, } ================================================ FILE: postgresql_archive/src/repository/maven/repository.rs ================================================ use crate::Error::{ArchiveHashMismatch, RepositoryFailure, VersionNotFound}; use crate::repository::Archive; use crate::repository::maven::models::Metadata; use crate::repository::model::Repository; use crate::{Result, hasher}; use async_trait::async_trait; use futures_util::StreamExt; use reqwest::header::HeaderMap; use reqwest_middleware::{ClientBuilder, ClientWithMiddleware}; use reqwest_retry::RetryTransientMiddleware; use reqwest_retry::policies::ExponentialBackoff; use reqwest_tracing::TracingMiddleware; use semver::{Version, VersionReq}; use std::env; use std::io::Write; use std::sync::LazyLock; use tracing::{debug, instrument, warn}; #[cfg(feature = "indicatif")] use tracing_indicatif::span_ext::IndicatifSpanExt; static USER_AGENT: LazyLock = LazyLock::new(|| { format!( "{PACKAGE}/{VERSION}", PACKAGE = env!("CARGO_PKG_NAME"), VERSION = env!("CARGO_PKG_VERSION") ) }); /// Maven repository. /// /// This repository is used to interact with Maven repositories /// (e.g. ). #[derive(Debug)] pub struct Maven { url: String, } impl Maven { /// Creates a new Maven repository from the specified URL in the format /// /// /// # Errors /// * If the URL is invalid. #[expect(clippy::new_ret_no_self)] pub fn new(url: &str) -> Result> { Ok(Box::new(Self { url: url.to_string(), })) } /// Gets the artifact id and version that matches the specified version requirement. /// /// # Errors /// * If the version requirement does not match any versions. #[instrument(level = "debug")] async fn get_artifact(&self, version_req: &VersionReq) -> Result<(String, Version)> { debug!("Attempting to locate release for version requirement {version_req}"); let client = reqwest_client(); let url = format!("{}/maven-metadata.xml", self.url); let request = client.get(&url).headers(Self::headers()); let response = request.send().await?.error_for_status()?; let text = response.text().await?; let metadata: Metadata = quick_xml::de::from_str(&text)?; let artifact = metadata.artifact_id; let mut result = None; for version in &metadata.versioning.versions.version { let version = Version::parse(version)?; if version_req.matches(&version) { if let Some(result_version) = result.clone() { if version > result_version { result = Some(version); } } else { result = Some(version); } } } match &result { Some(version) => { debug!("Version {version} found for version requirement {version_req}"); Ok((artifact, version.clone())) } None => Err(VersionNotFound(version_req.to_string())), } } /// Returns the headers for the Maven request. fn headers() -> HeaderMap { let mut headers = HeaderMap::new(); headers.append("User-Agent", USER_AGENT.parse().unwrap()); headers } } #[async_trait] impl Repository for Maven { #[instrument(level = "debug")] fn name(&self) -> &str { "Maven" } #[instrument(level = "debug")] async fn get_version(&self, version_req: &VersionReq) -> Result { debug!("Attempting to locate release for version requirement {version_req}"); let (_, version) = self.get_artifact(version_req).await?; Ok(version) } #[instrument] async fn get_archive(&self, version_req: &VersionReq) -> Result { let (artifact, version) = self.get_artifact(version_req).await?; let archive_name = format!("{artifact}-{version}.jar"); let archive_url = format!("{url}/{version}/{artifact}-{version}.jar", url = self.url,); let mut hasher_result = None; // Try to find a hasher for the archive; the extensions are ordered by preference. for extension in &["sha512", "sha256", "sha1", "md5"] { if let Ok(hasher_fn) = hasher::registry::get(&self.url, &(*extension).to_string()) { hasher_result = Some((extension, hasher_fn)); } } let Some((extension, hasher_fn)) = hasher_result else { return Err(RepositoryFailure(format!( "no hashers found for {}", &self.url ))); }; let archive_hash_url = format!("{archive_url}.{extension}"); let client = reqwest_client(); debug!("Downloading archive hash {archive_hash_url}"); let request = client.get(&archive_hash_url).headers(Self::headers()); let response = request.send().await?.error_for_status()?; let hash = response.text().await?; debug!("Archive hash {archive_hash_url} downloaded: {}", hash.len(),); debug!("Downloading archive {archive_url}"); let request = client.get(&archive_url).headers(Self::headers()); let response = request.send().await?.error_for_status()?; #[cfg(feature = "indicatif")] let span = tracing::Span::current(); #[cfg(feature = "indicatif")] { let content_length = response.content_length().unwrap_or_default(); span.pb_set_length(content_length); } let mut bytes = Vec::new(); let mut source = response.bytes_stream(); while let Some(chunk) = source.next().await { bytes.write_all(&chunk?)?; #[cfg(feature = "indicatif")] span.pb_set_position(bytes.len() as u64); } debug!("Archive {archive_url} downloaded: {}", bytes.len(),); let archive_hash = hasher_fn(&bytes)?; if archive_hash != hash { return Err(ArchiveHashMismatch { archive_hash, hash }); } let archive = Archive::new(archive_name, version, bytes); Ok(archive) } } /// Creates a new reqwest client with middleware for tracing, and retrying transient errors. fn reqwest_client() -> ClientWithMiddleware { let retry_policy = ExponentialBackoff::builder().build_with_max_retries(3); ClientBuilder::new(reqwest::Client::new()) .with(TracingMiddleware::default()) .with(RetryTransientMiddleware::new_with_policy(retry_policy)) .build() } #[cfg(test)] mod tests { use super::*; const URL: &str = "https://repo1.maven.org/maven2/io/zonky/test/postgres/embedded-postgres-binaries-linux-amd64"; #[test] fn test_name() { let maven = Maven::new(URL).unwrap(); assert_eq!("Maven", maven.name()); } // // get_version tests // #[tokio::test] async fn test_get_version() -> Result<()> { let maven = Maven::new(URL)?; let version_req = VersionReq::STAR; let version = maven.get_version(&version_req).await?; assert!(version > Version::new(0, 0, 0)); Ok(()) } #[tokio::test] async fn test_get_specific_version() -> Result<()> { let maven = Maven::new(URL)?; let version_req = VersionReq::parse("=16.2.0")?; let version = maven.get_version(&version_req).await?; assert_eq!(Version::new(16, 2, 0), version); Ok(()) } #[tokio::test] async fn test_get_specific_not_found() -> Result<()> { let maven = Maven::new(URL)?; let version_req = VersionReq::parse("=0.0.0")?; let error = maven.get_version(&version_req).await.unwrap_err(); assert_eq!("version not found for '=0.0.0'", error.to_string()); Ok(()) } // // get_archive tests // #[tokio::test] async fn test_get_archive() -> Result<()> { let maven = Maven::new(URL)?; let version = Version::new(16, 2, 0); let version_req = VersionReq::parse(format!("={version}").as_str())?; let archive = maven.get_archive(&version_req).await?; assert_eq!( format!("embedded-postgres-binaries-linux-amd64-{version}.jar"), archive.name() ); assert_eq!(&version, archive.version()); assert!(!archive.bytes().is_empty()); Ok(()) } } ================================================ FILE: postgresql_archive/src/repository/mod.rs ================================================ #[cfg(feature = "github")] pub mod github; #[cfg(feature = "maven")] pub mod maven; pub mod model; pub mod registry; pub use model::{Archive, Repository}; ================================================ FILE: postgresql_archive/src/repository/model.rs ================================================ use async_trait::async_trait; use semver::{Version, VersionReq}; use std::fmt::Debug; /// A trait for archive repository implementations. #[async_trait] pub trait Repository: Debug + Send + Sync { /// Gets the name of the repository. fn name(&self) -> &str; /// Gets the version for the specified [version requirement](VersionReq). If a /// [version](Version) for the [version requirement](VersionReq) is not found, /// then an error is returned. /// /// # Errors /// * If the version is not found. async fn get_version(&self, version_req: &VersionReq) -> crate::Result; /// Gets the archive for a given [version requirement](VersionReq) that passes the default /// matcher. If no archive is found for the [version requirement](VersionReq) and matcher then /// an [error](crate::error::Error) is returned. /// /// # Errors /// * If the archive is not found. /// * If the archive cannot be downloaded. async fn get_archive(&self, version_req: &VersionReq) -> crate::Result; } /// A struct representing an archive. #[derive(Clone, Debug)] pub struct Archive { name: String, version: Version, bytes: Vec, } impl Archive { /// Creates a new archive. #[must_use] pub fn new(name: String, version: Version, bytes: Vec) -> Self { Self { name, version, bytes, } } /// Gets the name of the archive. #[must_use] pub fn name(&self) -> &str { &self.name } /// Gets the version of the archive. #[must_use] pub fn version(&self) -> &Version { &self.version } /// Gets the bytes of the archive. #[must_use] pub fn bytes(&self) -> &[u8] { &self.bytes } } #[cfg(test)] mod tests { use super::*; use semver::Version; #[test] fn test_archive() { let name = "test".to_string(); let version = Version::parse("1.0.0").unwrap(); let bytes = vec![0, 1, 2, 3]; let archive = Archive::new(name.clone(), version.clone(), bytes.clone()); assert_eq!(archive.name(), name); assert_eq!(archive.version(), &version); assert_eq!(archive.bytes(), bytes.as_slice()); } } ================================================ FILE: postgresql_archive/src/repository/registry.rs ================================================ use crate::Error::UnsupportedRepository; use crate::Result; #[cfg(feature = "theseus")] use crate::configuration::theseus; #[cfg(feature = "zonky")] use crate::configuration::zonky; #[cfg(feature = "github")] use crate::repository::github::repository::GitHub; use crate::repository::model::Repository; use std::sync::{Arc, LazyLock, Mutex, RwLock}; static REGISTRY: LazyLock>> = LazyLock::new(|| Arc::new(Mutex::new(RepositoryRegistry::default()))); type SupportsFn = fn(&str) -> Result; type NewFn = dyn Fn(&str) -> Result> + Send + Sync; /// Singleton struct to store repositories #[expect(clippy::type_complexity)] struct RepositoryRegistry { repositories: Vec<(Arc>, Arc>)>, } impl RepositoryRegistry { /// Creates a new repository registry. fn new() -> Self { Self { repositories: Vec::new(), } } /// Registers a repository. Newly registered repositories take precedence over existing ones. fn register(&mut self, supports_fn: SupportsFn, new_fn: Box) { self.repositories.insert( 0, ( Arc::new(RwLock::new(supports_fn)), Arc::new(RwLock::new(new_fn)), ), ); } /// Gets a repository that supports the specified URL /// /// # Errors /// * If the URL is not supported. fn get(&self, url: &str) -> Result> { for (supports_fn, new_fn) in &self.repositories { let supports_function = supports_fn.read()?; if supports_function(url)? { let new_function = new_fn.read()?; return new_function(url); } } Err(UnsupportedRepository(url.to_string())) } } impl Default for RepositoryRegistry { /// Creates a new repository registry with the default repositories registered. fn default() -> Self { let mut registry = Self::new(); #[cfg(feature = "theseus")] registry.register( |url| Ok(url.starts_with(theseus::URL)), Box::new(GitHub::new), ); #[cfg(feature = "zonky")] registry.register( |url| Ok(url.starts_with(zonky::URL)), Box::new(zonky::Zonky::new), ); registry } } /// Registers a repository. Newly registered repositories can override existing ones. /// /// # Errors /// * If the registry is poisoned. pub fn register(supports_fn: SupportsFn, new_fn: Box) -> Result<()> { REGISTRY.lock()?.register(supports_fn, new_fn); Ok(()) } /// Gets a repository that supports the specified URL /// /// # Errors /// * If the URL is not supported. pub fn get(url: &str) -> Result> { REGISTRY.lock()?.get(url) } #[cfg(test)] mod tests { use super::*; use crate::repository::Archive; use async_trait::async_trait; use semver::{Version, VersionReq}; use std::fmt::Debug; #[derive(Debug)] struct TestRepository; impl TestRepository { #[expect(clippy::new_ret_no_self)] #[expect(clippy::unnecessary_wraps)] fn new(_url: &str) -> Result> { Ok(Box::new(Self)) } } #[async_trait] impl Repository for TestRepository { fn name(&self) -> &'static str { "test" } async fn get_version(&self, _version_req: &VersionReq) -> Result { Ok(Version::new(0, 0, 42)) } async fn get_archive(&self, _version_req: &VersionReq) -> Result { Ok(Archive::new( "test".to_string(), Version::new(0, 0, 42), Vec::new(), )) } } #[tokio::test] async fn test_register() -> Result<()> { register( |url| Ok(url == "https://foo.com"), Box::new(TestRepository::new), )?; let url = "https://foo.com"; let repository = get(url)?; assert_eq!("test", repository.name()); assert!(repository.get_version(&VersionReq::STAR).await.is_ok()); assert!(repository.get_archive(&VersionReq::STAR).await.is_ok()); Ok(()) } #[test] fn test_get_error() { let error = get("foo").unwrap_err(); assert_eq!("unsupported repository for 'foo'", error.to_string()); } #[test] #[cfg(feature = "theseus")] fn test_get_theseus_postgresql_binaries() { assert!(get(theseus::URL).is_ok()); } #[test] #[cfg(feature = "zonky")] fn test_get_zonky_postgresql_binaries() { assert!(get(zonky::URL).is_ok()); } } ================================================ FILE: postgresql_archive/src/version.rs ================================================ use crate::Result; use semver::{Version, VersionReq}; /// A trait for getting the exact version from a [version requirement](VersionReq). pub trait ExactVersion { /// Gets the exact version from a [version requirement](VersionReq) or `None`. fn exact_version(&self) -> Option; } impl ExactVersion for VersionReq { /// Gets the exact version from a [version requirement](VersionReq) or `None`. fn exact_version(&self) -> Option { if self.comparators.len() != 1 { return None; } let comparator = self.comparators.first()?; if comparator.op != semver::Op::Exact { return None; } let minor = comparator.minor?; let patch = comparator.patch?; let version = Version::new(comparator.major, minor, patch); Some(version) } } /// A trait for getting the exact version requirement from a [version](Version). pub trait ExactVersionReq { /// Gets the exact version requirement from a [version](Version). /// /// # Errors /// * If the version requirement cannot be parsed. fn exact_version_req(&self) -> Result; } impl ExactVersionReq for Version { /// Gets the exact version requirement from a [version](Version). /// /// # Errors /// * If the version requirement cannot be parsed. fn exact_version_req(&self) -> Result { let version = format!("={self}"); let version_req = VersionReq::parse(&version)?; Ok(version_req) } } #[cfg(test)] mod tests { use super::*; use crate::Result; #[test] fn test_exact_version_star() { let version_req = VersionReq::STAR; assert_eq!(None, version_req.exact_version()); } #[test] fn test_exact_version_greater_than() -> Result<()> { let version_req = VersionReq::parse(">16")?; assert_eq!(None, version_req.exact_version()); Ok(()) } #[test] fn test_exact_version_full_no_equals() -> Result<()> { let version_req = VersionReq::parse("16.4.0")?; assert_eq!(None, version_req.exact_version()); Ok(()) } #[test] fn test_exact_version_full_equals() -> Result<()> { let version_req = VersionReq::parse("=16.4.0")?; let version = Version::new(16, 4, 0); assert_eq!(Some(version), version_req.exact_version()); Ok(()) } #[test] fn test_exact_version_major_minor() -> Result<()> { let version_req = VersionReq::parse("=16.4")?; assert_eq!(None, version_req.exact_version()); Ok(()) } #[test] fn test_exact_version_major() -> Result<()> { let version_req = VersionReq::parse("=16")?; assert_eq!(None, version_req.exact_version()); Ok(()) } #[test] fn test_exact_version_range() -> Result<()> { let version_req = VersionReq::parse(">= 16, < 17")?; assert_eq!(None, version_req.exact_version()); Ok(()) } #[test] fn test_exact_version_req_not_equal() -> Result<()> { let version = Version::new(1, 2, 3); assert_ne!(VersionReq::parse("=1.0.0")?, version.exact_version_req()?); Ok(()) } #[test] fn test_exact_version_req_major_minor_patch() -> Result<()> { let version = Version::new(16, 4, 0); assert_eq!(VersionReq::parse("=16.4.0")?, version.exact_version_req()?); Ok(()) } #[test] fn test_exact_version_prerelease() -> Result<()> { let version = Version::parse("1.2.3-alpha")?; assert_eq!( VersionReq::parse("=1.2.3-alpha")?, version.exact_version_req()? ); Ok(()) } } ================================================ FILE: postgresql_archive/tests/archive.rs ================================================ use postgresql_archive::configuration::theseus; use postgresql_archive::extract; use postgresql_archive::{get_archive, get_version}; use semver::VersionReq; use std::fs::remove_dir_all; use test_log::test; #[test(tokio::test)] async fn test_get_version_not_found() -> postgresql_archive::Result<()> { let invalid_version_req = VersionReq::parse("=1.0.0")?; let result = get_version(theseus::URL, &invalid_version_req).await; assert!(result.is_err()); Ok(()) } #[test(tokio::test)] async fn test_get_version() -> anyhow::Result<()> { let version_req = VersionReq::parse("=16.4.0")?; let latest_version = get_version(theseus::URL, &version_req).await?; assert!(version_req.matches(&latest_version)); Ok(()) } #[test(tokio::test)] async fn test_get_archive_and_extract() -> anyhow::Result<()> { let url = theseus::URL; let version_req = VersionReq::parse("=16.4.0")?; let (archive_version, archive) = get_archive(url, &version_req).await?; assert!(version_req.matches(&archive_version)); let out_dir = tempfile::tempdir()?.path().to_path_buf(); let files = extract(url, &archive, &out_dir).await?; #[cfg(all(target_os = "linux", target_arch = "x86_64"))] assert_eq!(1_312, files.len()); #[cfg(all(target_os = "macos", target_arch = "aarch64"))] assert_eq!(1_271, files.len()); #[cfg(all(target_os = "macos", target_arch = "x86_64"))] assert_eq!(1_271, files.len()); #[cfg(all(target_os = "windows", target_arch = "x86_64"))] assert_eq!(3_092, files.len()); remove_dir_all(&out_dir)?; Ok(()) } #[test(tokio::test)] async fn test_get_archive_version_not_found() -> postgresql_archive::Result<()> { let invalid_version_req = VersionReq::parse("=1.0.0")?; let result = get_archive(theseus::URL, &invalid_version_req).await; assert!(result.is_err()); Ok(()) } ================================================ FILE: postgresql_archive/tests/blocking.rs ================================================ #[cfg(feature = "blocking")] use postgresql_archive::VersionReq; #[cfg(feature = "blocking")] use postgresql_archive::blocking::{extract, get_archive, get_version}; #[cfg(feature = "blocking")] use postgresql_archive::configuration::theseus; #[cfg(feature = "blocking")] use std::fs::remove_dir_all; #[cfg(feature = "blocking")] use test_log::test; #[cfg(feature = "blocking")] #[test] fn test_get_version() -> anyhow::Result<()> { let version_req = VersionReq::STAR; let latest_version = get_version(theseus::URL, &version_req)?; assert!(version_req.matches(&latest_version)); Ok(()) } #[cfg(feature = "blocking")] #[test] fn test_get_archive_and_extract() -> anyhow::Result<()> { let url = theseus::URL; let version_req = &VersionReq::parse("=16.4.0")?; let (archive_version, archive) = get_archive(url, version_req)?; assert!(version_req.matches(&archive_version)); let out_dir = tempfile::tempdir()?.path().to_path_buf(); let files = extract(url, &archive, &out_dir)?; assert!(!files.is_empty()); remove_dir_all(&out_dir)?; Ok(()) } ================================================ FILE: postgresql_archive/tests/zonky.rs ================================================ #[cfg(feature = "zonky")] use postgresql_archive::configuration::zonky; #[cfg(feature = "zonky")] use postgresql_archive::extract; #[cfg(feature = "zonky")] use postgresql_archive::{get_archive, get_version}; #[cfg(feature = "zonky")] use semver::VersionReq; #[cfg(feature = "zonky")] use std::fs::remove_dir_all; #[cfg(feature = "zonky")] use test_log::test; #[test(tokio::test)] #[cfg(feature = "zonky")] async fn test_get_version_not_found() -> postgresql_archive::Result<()> { let invalid_version_req = VersionReq::parse("=1.0.0")?; let result = get_version(zonky::URL, &invalid_version_req).await; assert!(result.is_err()); Ok(()) } #[test(tokio::test)] #[cfg(feature = "zonky")] async fn test_get_version() -> anyhow::Result<()> { let version_req = VersionReq::parse("=16.2.0")?; let latest_version = get_version(zonky::URL, &version_req).await?; assert!(version_req.matches(&latest_version)); Ok(()) } #[test(tokio::test)] #[cfg(feature = "zonky")] async fn test_get_archive_and_extract() -> anyhow::Result<()> { let url = zonky::URL; let version_req = VersionReq::parse("=16.4.0")?; let (archive_version, archive) = get_archive(url, &version_req).await?; assert!(version_req.matches(&archive_version)); let out_dir = tempfile::tempdir()?.path().to_path_buf(); let files = extract(url, &archive, &out_dir).await?; assert!(files.len() > 1_000); remove_dir_all(&out_dir)?; Ok(()) } #[test(tokio::test)] #[cfg(feature = "zonky")] async fn test_get_archive_version_not_found() -> postgresql_archive::Result<()> { let invalid_version_req = VersionReq::parse("=1.0.0")?; let result = get_archive(zonky::URL, &invalid_version_req).await; assert!(result.is_err()); Ok(()) } ================================================ FILE: postgresql_commands/Cargo.toml ================================================ [package] authors.workspace = true categories.workspace = true description = "PostgreSQL commands for interacting with a PostgreSQL server." edition.workspace = true keywords.workspace = true license.workspace = true name = "postgresql_commands" repository = "https://github.com/theseus-rs/postgresql-embedded" rust-version.workspace = true version.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] thiserror = { workspace = true } tokio = { workspace = true, features = ["full"], optional = true } tracing = { workspace = true, features = ["log"] } [dev-dependencies] test-log = { workspace = true } tokio = { workspace = true, features = ["full"] } [features] default = [] tokio = ["dep:tokio"] ================================================ FILE: postgresql_commands/README.md ================================================ # PostgreSQL Commands [![ci](https://github.com/theseus-rs/postgresql-embedded/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/theseus-rs/postgresql-embedded/actions/workflows/ci.yml) [![Documentation](https://docs.rs/postgresql_commands/badge.svg)](https://docs.rs/postgresql_commands) [![Code Coverage](https://codecov.io/gh/theseus-rs/postgresql-embedded/branch/main/graph/badge.svg)](https://codecov.io/gh/theseus-rs/postgresql-embedded) [![Latest version](https://img.shields.io/crates/v/postgresql_commands.svg)](https://crates.io/crates/postgresql_commands) [![License](https://img.shields.io/crates/l/postgresql_commands?)](https://github.com/theseus-rs/postgresql-embedded/tree/main/postgresql_commands#license) [![Semantic Versioning](https://img.shields.io/badge/%E2%9A%99%EF%B8%8F_SemVer-2.0.0-blue)](https://semver.org/spec/v2.0.0.html) A library for executing PostgreSQL command line utilities. ## Examples ```rust use postgresql_commands::Result; use postgresql_commands::psql::PsqlBuilder; fn main() -> Result<()> { let psql = PsqlBuilder::new() .command("CREATE DATABASE \"test\"") .host("127.0.0.1") .port(5432) .username("postgresql") .pg_password("password") .build(); let (stdout, stderr) = psql.execute()?; Ok(()) } ``` ## Feature flags The following features are available: | Name | Description | Default? | |---------|-----------------------------------|----------| | `tokio` | Enables the use of tokio commands | No | ## License Licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) at your option. ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. ================================================ FILE: postgresql_commands/src/clusterdb.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `clusterdb` clusters all previously clustered tables in a database. #[derive(Clone, Debug, Default)] pub struct ClusterDbBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, all: bool, dbname: Option, echo: bool, quiet: bool, table: Option, verbose: bool, version: bool, help: bool, host: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, maintenance_db: Option, } impl ClusterDbBuilder { /// Create a new [`ClusterDbBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`ClusterDbBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// Cluster all databases #[must_use] pub fn all(mut self) -> Self { self.all = true; self } /// Database to cluster #[must_use] pub fn dbname>(mut self, dbname: S) -> Self { self.dbname = Some(dbname.as_ref().to_os_string()); self } /// Show the commands being sent to the server #[must_use] pub fn echo(mut self) -> Self { self.echo = true; self } /// Don't write any messages #[must_use] pub fn quiet(mut self) -> Self { self.quiet = true; self } /// Cluster specific table(s) only #[must_use] pub fn table>(mut self, table: S) -> Self { self.table = Some(table.as_ref().to_os_string()); self } /// Write a lot of output #[must_use] pub fn verbose(mut self) -> Self { self.verbose = true; self } /// Output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// Show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// Database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// Database server port #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// User name to connect as #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// Never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// Force password prompt #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } /// Alternate maintenance database #[must_use] pub fn maintenance_db>(mut self, db: S) -> Self { self.maintenance_db = Some(db.as_ref().to_os_string()); self } } impl CommandBuilder for ClusterDbBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "clusterdb".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.all { args.push("--all".into()); } if let Some(dbname) = &self.dbname { args.push("--dbname".into()); args.push(dbname.into()); } if self.echo { args.push("--echo".into()); } if self.quiet { args.push("--quiet".into()); } if let Some(table) = &self.table { args.push("--table".into()); args.push(table.into()); } if self.verbose { args.push("--verbose".into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } if let Some(maintenance_db) = &self.maintenance_db { args.push("--maintenance-db".into()); args.push(maintenance_db.into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = ClusterDbBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("clusterdb"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = ClusterDbBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./clusterdb" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\clusterdb" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = ClusterDbBuilder::new() .env("PGDATABASE", "database") .all() .dbname("dbname") .echo() .quiet() .table("table") .verbose() .version() .help() .host("localhost") .port(5432) .username("postgres") .no_password() .password() .pg_password("password") .maintenance_db("postgres") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"clusterdb" "--all" "--dbname" "dbname" "--echo" "--quiet" "--table" "table" "--verbose" "--version" "--help" "--host" "localhost" "--port" "5432" "--username" "postgres" "--no-password" "--password" "--maintenance-db" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = ClusterDbBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./clusterdb" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\clusterdb" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/createdb.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `createdb` creates a `PostgreSQL` database. #[derive(Clone, Debug, Default)] pub struct CreateDbBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, tablespace: Option, echo: bool, encoding: Option, locale: Option, lc_collate: Option, lc_ctype: Option, icu_locale: Option, icu_rules: Option, locale_provider: Option, owner: Option, strategy: Option, template: Option, version: bool, help: bool, host: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, maintenance_db: Option, dbname: Option, description: Option, } impl CreateDbBuilder { /// Create a new [`CreateDbBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`CreateDbBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// Default tablespace for the database #[must_use] pub fn tablespace>(mut self, tablespace: S) -> Self { self.tablespace = Some(tablespace.as_ref().to_os_string()); self } /// Show the commands being sent to the server #[must_use] pub fn echo(mut self) -> Self { self.echo = true; self } /// Encoding for the database #[must_use] pub fn encoding>(mut self, encoding: S) -> Self { self.encoding = Some(encoding.as_ref().to_os_string()); self } /// Locale settings for the database #[must_use] pub fn locale>(mut self, locale: S) -> Self { self.locale = Some(locale.as_ref().to_os_string()); self } /// `LC_COLLATE` setting for the database #[must_use] pub fn lc_collate>(mut self, lc_collate: S) -> Self { self.lc_collate = Some(lc_collate.as_ref().to_os_string()); self } /// `LC_CTYPE` setting for the database #[must_use] pub fn lc_ctype>(mut self, lc_ctype: S) -> Self { self.lc_ctype = Some(lc_ctype.as_ref().to_os_string()); self } /// ICU locale setting for the database #[must_use] pub fn icu_locale>(mut self, icu_locale: S) -> Self { self.icu_locale = Some(icu_locale.as_ref().to_os_string()); self } /// ICU rules setting for the database #[must_use] pub fn icu_rules>(mut self, icu_rules: S) -> Self { self.icu_rules = Some(icu_rules.as_ref().to_os_string()); self } /// Locale provider for the database's default collation #[must_use] pub fn locale_provider>(mut self, locale_provider: S) -> Self { self.locale_provider = Some(locale_provider.as_ref().to_os_string()); self } /// Database user to own the new database #[must_use] pub fn owner>(mut self, owner: S) -> Self { self.owner = Some(owner.as_ref().to_os_string()); self } /// Database creation strategy `wal_log` or `file_copy` #[must_use] pub fn strategy>(mut self, strategy: S) -> Self { self.strategy = Some(strategy.as_ref().to_os_string()); self } /// Template database to copy #[must_use] pub fn template>(mut self, template: S) -> Self { self.template = Some(template.as_ref().to_os_string()); self } /// Output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// Show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// Database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// Database server port #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// User name to connect as #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// Never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// Force password prompt #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } /// Alternate maintenance database #[must_use] pub fn maintenance_db>(mut self, db: S) -> Self { self.maintenance_db = Some(db.as_ref().to_os_string()); self } /// Database name #[must_use] pub fn dbname>(mut self, dbname: S) -> Self { self.dbname = Some(dbname.as_ref().to_os_string()); self } /// Database description #[must_use] pub fn description>(mut self, description: S) -> Self { self.description = Some(description.as_ref().to_os_string()); self } } impl CommandBuilder for CreateDbBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "createdb".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(tablespace) = &self.tablespace { args.push("--tablespace".into()); args.push(tablespace.into()); } if self.echo { args.push("--echo".into()); } if let Some(encoding) = &self.encoding { args.push("--encoding".into()); args.push(encoding.into()); } if let Some(locale) = &self.locale { args.push("--locale".into()); args.push(locale.into()); } if let Some(lc_collate) = &self.lc_collate { args.push("--lc-collate".into()); args.push(lc_collate.into()); } if let Some(lc_ctype) = &self.lc_ctype { args.push("--lc-ctype".into()); args.push(lc_ctype.into()); } if let Some(icu_locale) = &self.icu_locale { args.push("--icu-locale".into()); args.push(icu_locale.into()); } if let Some(icu_rules) = &self.icu_rules { args.push("--icu-rules".into()); args.push(icu_rules.into()); } if let Some(locale_provider) = &self.locale_provider { args.push("--locale-provider".into()); args.push(locale_provider.into()); } if let Some(owner) = &self.owner { args.push("--owner".into()); args.push(owner.into()); } if let Some(strategy) = &self.strategy { args.push("--strategy".into()); args.push(strategy.into()); } if let Some(template) = &self.template { args.push("--template".into()); args.push(template.into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } if let Some(maintenance_db) = &self.maintenance_db { args.push("--maintenance-db".into()); args.push(maintenance_db.into()); } if let Some(dbname) = &self.dbname { args.push(dbname.into()); } if let Some(description) = &self.description { args.push(description.into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = CreateDbBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("createdb"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = CreateDbBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./createdb" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\createdb" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = CreateDbBuilder::new() .env("PGDATABASE", "database") .tablespace("pg_default") .echo() .encoding("UTF8") .locale("en_US.UTF-8") .lc_collate("en_US.UTF-8") .lc_ctype("en_US.UTF-8") .icu_locale("en_US") .icu_rules("standard") .locale_provider("icu") .owner("postgres") .strategy("wal_log") .template("template0") .version() .help() .host("localhost") .port(5432) .username("postgres") .no_password() .password() .pg_password("password") .maintenance_db("postgres") .dbname("testdb") .description("Test Database") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"createdb" "--tablespace" "pg_default" "--echo" "--encoding" "UTF8" "--locale" "en_US.UTF-8" "--lc-collate" "en_US.UTF-8" "--lc-ctype" "en_US.UTF-8" "--icu-locale" "en_US" "--icu-rules" "standard" "--locale-provider" "icu" "--owner" "postgres" "--strategy" "wal_log" "--template" "template0" "--version" "--help" "--host" "localhost" "--port" "5432" "--username" "postgres" "--no-password" "--password" "--maintenance-db" "postgres" "testdb" "Test Database""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = CreateDbBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./createdb" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\createdb" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/createuser.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `createuser` creates a new `PostgreSQL` role. #[derive(Clone, Debug, Default)] pub struct CreateUserBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, with_admin: Option, connection_limit: Option, createdb: bool, no_createdb: bool, echo: bool, member_of: Option, inherit: bool, no_inherit: bool, login: bool, no_login: bool, with_member: Option, pwprompt: bool, createrole: bool, no_createrole: bool, superuser: bool, no_superuser: bool, valid_until: Option, version: bool, interactive: bool, bypassrls: bool, no_bypassrls: bool, replication: bool, no_replication: bool, help: bool, host: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, } impl CreateUserBuilder { /// Create a new [`CreateUserBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`CreateUserBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// ROLE will be a member of new role with admin option #[must_use] pub fn with_admin>(mut self, role: S) -> Self { self.with_admin = Some(role.as_ref().to_os_string()); self } /// Connection limit for role (default: no limit) #[must_use] pub fn connection_limit(mut self, limit: u32) -> Self { self.connection_limit = Some(limit); self } /// Role can create new databases #[must_use] pub fn createdb(mut self) -> Self { self.createdb = true; self } /// Role cannot create databases (default) #[must_use] pub fn no_createdb(mut self) -> Self { self.no_createdb = true; self } /// Show the commands being sent to the server #[must_use] pub fn echo(mut self) -> Self { self.echo = true; self } /// New role will be a member of ROLE #[must_use] pub fn member_of>(mut self, role: S) -> Self { self.member_of = Some(role.as_ref().to_os_string()); self } /// Role inherits privileges of roles it is a member of (default) #[must_use] pub fn inherit(mut self) -> Self { self.inherit = true; self } /// Role does not inherit privileges #[must_use] pub fn no_inherit(mut self) -> Self { self.no_inherit = true; self } /// Role can login (default) #[must_use] pub fn login(mut self) -> Self { self.login = true; self } /// Role cannot login #[must_use] pub fn no_login(mut self) -> Self { self.no_login = true; self } /// ROLE will be a member of new role #[must_use] pub fn with_member>(mut self, role: S) -> Self { self.with_member = Some(role.as_ref().to_os_string()); self } /// Assign a password to new role #[must_use] pub fn pwprompt(mut self) -> Self { self.pwprompt = true; self } /// Role can create new roles #[must_use] pub fn createrole(mut self) -> Self { self.createrole = true; self } /// Role cannot create roles (default) #[must_use] pub fn no_createrole(mut self) -> Self { self.no_createrole = true; self } /// Role will be superuser #[must_use] pub fn superuser(mut self) -> Self { self.superuser = true; self } /// Role will not be superuser (default) #[must_use] pub fn no_superuser(mut self) -> Self { self.no_superuser = true; self } /// Password expiration date and time for role #[must_use] pub fn valid_until>(mut self, timestamp: S) -> Self { self.valid_until = Some(timestamp.as_ref().to_os_string()); self } /// Output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// Prompt for missing role name and attributes rather than using defaults #[must_use] pub fn interactive(mut self) -> Self { self.interactive = true; self } /// Role can bypass row-level security (RLS) policy #[must_use] pub fn bypassrls(mut self) -> Self { self.bypassrls = true; self } /// Role cannot bypass row-level security (RLS) policy (default) #[must_use] pub fn no_bypassrls(mut self) -> Self { self.no_bypassrls = true; self } /// Role can initiate replication #[must_use] pub fn replication(mut self) -> Self { self.replication = true; self } /// Role cannot initiate replication (default) #[must_use] pub fn no_replication(mut self) -> Self { self.no_replication = true; self } /// Show this help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// Database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// Database server port #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// User name to connect as (not the one to create) #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// Never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// Force password prompt #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } } impl CommandBuilder for CreateUserBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "createuser".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(role) = &self.with_admin { args.push("--with-admin".into()); args.push(role.into()); } if let Some(limit) = &self.connection_limit { args.push("--connection-limit".into()); args.push(limit.to_string().into()); } if self.createdb { args.push("--createdb".into()); } if self.no_createdb { args.push("--no-createdb".into()); } if self.echo { args.push("--echo".into()); } if let Some(role) = &self.member_of { args.push("--member-of".into()); args.push(role.into()); } if self.inherit { args.push("--inherit".into()); } if self.no_inherit { args.push("--no-inherit".into()); } if self.login { args.push("--login".into()); } if self.no_login { args.push("--no-login".into()); } if let Some(role) = &self.with_member { args.push("--with-member".into()); args.push(role.into()); } if self.pwprompt { args.push("--pwprompt".into()); } if self.createrole { args.push("--createrole".into()); } if self.no_createrole { args.push("--no-createrole".into()); } if self.superuser { args.push("--superuser".into()); } if self.no_superuser { args.push("--no-superuser".into()); } if let Some(timestamp) = &self.valid_until { args.push("--valid-until".into()); args.push(timestamp.into()); } if self.version { args.push("--version".into()); } if self.interactive { args.push("--interactive".into()); } if self.bypassrls { args.push("--bypassrls".into()); } if self.no_bypassrls { args.push("--no-bypassrls".into()); } if self.replication { args.push("--replication".into()); } if self.no_replication { args.push("--no-replication".into()); } if self.help { args.push("--help".into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = CreateUserBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("createuser"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = CreateUserBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./createuser" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\createuser" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = CreateUserBuilder::new() .env("PGDATABASE", "database") .with_admin("admin") .connection_limit(10) .createdb() .no_createdb() .echo() .member_of("member") .inherit() .no_inherit() .login() .no_login() .with_member("member") .pwprompt() .createrole() .no_createrole() .superuser() .no_superuser() .valid_until("2021-12-31") .version() .interactive() .bypassrls() .no_bypassrls() .replication() .no_replication() .help() .host("localhost") .port(5432) .username("username") .no_password() .password() .pg_password("password") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"createuser" "--with-admin" "admin" "--connection-limit" "10" "--createdb" "--no-createdb" "--echo" "--member-of" "member" "--inherit" "--no-inherit" "--login" "--no-login" "--with-member" "member" "--pwprompt" "--createrole" "--no-createrole" "--superuser" "--no-superuser" "--valid-until" "2021-12-31" "--version" "--interactive" "--bypassrls" "--no-bypassrls" "--replication" "--no-replication" "--help" "--host" "localhost" "--port" "5432" "--username" "username" "--no-password" "--password""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = CreateUserBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./createuser" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\createuser" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/dropdb.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `dropdb` removes a `PostgreSQL` database. #[derive(Clone, Debug, Default)] pub struct DropDbBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, echo: bool, force: bool, interactive: bool, version: bool, if_exists: bool, help: bool, host: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, maintenance_db: Option, dbname: Option, } impl DropDbBuilder { /// Create a new [`DropDbBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`DropDbBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// Show the commands being sent to the server #[must_use] pub fn echo(mut self) -> Self { self.echo = true; self } /// Try to terminate other connections before dropping #[must_use] pub fn force(mut self) -> Self { self.force = true; self } /// Prompt before deleting anything #[must_use] pub fn interactive(mut self) -> Self { self.interactive = true; self } /// Output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// Don't report error if database doesn't exist #[must_use] pub fn if_exists(mut self) -> Self { self.if_exists = true; self } /// Show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// Database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// Database server port #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// User name to connect as #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// Never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// Force password prompt #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } /// Alternate maintenance database #[must_use] pub fn maintenance_db>(mut self, db: S) -> Self { self.maintenance_db = Some(db.as_ref().to_os_string()); self } /// Database name #[must_use] pub fn dbname>(mut self, dbname: S) -> Self { self.dbname = Some(dbname.as_ref().to_os_string()); self } } impl CommandBuilder for DropDbBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "dropdb".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.echo { args.push("--echo".into()); } if self.force { args.push("--force".into()); } if self.interactive { args.push("--interactive".into()); } if self.version { args.push("--version".into()); } if self.if_exists { args.push("--if-exists".into()); } if self.help { args.push("--help".into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } if let Some(db) = &self.maintenance_db { args.push("--maintenance-db".into()); args.push(db.into()); } if let Some(dbname) = &self.dbname { args.push(dbname.into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = DropDbBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("dropdb"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = DropDbBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./dropdb" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\dropdb" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = DropDbBuilder::new() .env("PGDATABASE", "database") .echo() .force() .interactive() .version() .if_exists() .help() .host("localhost") .port(5432) .username("postgres") .no_password() .password() .pg_password("password") .maintenance_db("postgres") .dbname("dbname") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"dropdb" "--echo" "--force" "--interactive" "--version" "--if-exists" "--help" "--host" "localhost" "--port" "5432" "--username" "postgres" "--no-password" "--password" "--maintenance-db" "postgres" "dbname""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = DropDbBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./dropdb" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\dropdb" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/dropuser.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `dropuser` removes a `PostgreSQL` role. #[derive(Clone, Debug, Default)] pub struct DropUserBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, echo: bool, interactive: bool, version: bool, if_exists: bool, help: bool, host: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, } impl DropUserBuilder { /// Create a new [`DropUserBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`DropUserBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// Show the commands being sent to the server #[must_use] pub fn echo(mut self) -> Self { self.echo = true; self } /// Prompt before deleting anything, and prompt for role name if not specified #[must_use] pub fn interactive(mut self) -> Self { self.interactive = true; self } /// Output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// Don't report error if user doesn't exist #[must_use] pub fn if_exists(mut self) -> Self { self.if_exists = true; self } /// Show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// Database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// Database server port #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// User name to connect as (not the one to drop) #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// Never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// Force password prompt #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } } impl CommandBuilder for DropUserBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "dropuser".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.echo { args.push("--echo".into()); } if self.interactive { args.push("--interactive".into()); } if self.version { args.push("--version".into()); } if self.if_exists { args.push("--if-exists".into()); } if self.help { args.push("--help".into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = DropUserBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("dropuser"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = DropUserBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./dropuser" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\dropuser" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = DropUserBuilder::new() .env("PGDATABASE", "database") .echo() .interactive() .version() .if_exists() .help() .host("localhost") .port(5432) .username("postgres") .no_password() .password() .pg_password("password") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"dropuser" "--echo" "--interactive" "--version" "--if-exists" "--help" "--host" "localhost" "--port" "5432" "--username" "postgres" "--no-password" "--password""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = DropUserBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./dropuser" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\dropuser" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/ecpg.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `ecpg` is the `PostgreSQL` embedded SQL preprocessor for C programs. #[derive(Clone, Debug, Default)] pub struct EcpgBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, c: bool, compatibility_mode: Option, symbol: Option, header_file: bool, system_include_files: bool, directory: Option, outfile: Option, runtime_behavior: Option, regression: bool, autocommit: bool, version: bool, help: bool, } impl EcpgBuilder { /// Create a new [`EcpgBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`EcpgBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { Self::new().program_dir(settings.get_binary_dir()) } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// Automatically generate C code from embedded SQL code #[must_use] pub fn c(mut self) -> Self { self.c = true; self } /// Set compatibility mode #[must_use] pub fn compatibility_mode>(mut self, compatibility_mode: S) -> Self { self.compatibility_mode = Some(compatibility_mode.as_ref().to_os_string()); self } /// Define SYMBOL #[must_use] pub fn symbol>(mut self, symbol: S) -> Self { self.symbol = Some(symbol.as_ref().to_os_string()); self } /// Parse a header file #[must_use] pub fn header_file(mut self) -> Self { self.header_file = true; self.c() } /// Parse system include files as well #[must_use] pub fn system_include_files(mut self) -> Self { self.system_include_files = true; self } /// Search DIRECTORY for include files #[must_use] pub fn directory>(mut self, directory: S) -> Self { self.directory = Some(directory.as_ref().to_os_string()); self } /// Write result to OUTFILE #[must_use] pub fn outfile>(mut self, outfile: S) -> Self { self.outfile = Some(outfile.as_ref().to_os_string()); self } /// Specify run-time behavior #[must_use] pub fn runtime_behavior>(mut self, runtime_behavior: S) -> Self { self.runtime_behavior = Some(runtime_behavior.as_ref().to_os_string()); self } /// Run in regression testing mode #[must_use] pub fn regression(mut self) -> Self { self.regression = true; self } /// Turn on autocommit of transactions #[must_use] pub fn autocommit(mut self) -> Self { self.autocommit = true; self } /// Output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// Show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } } impl CommandBuilder for EcpgBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "ecpg".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.c { args.push("-c".into()); } if let Some(mode) = &self.compatibility_mode { args.push("-C".into()); args.push(mode.into()); } if let Some(symbol) = &self.symbol { args.push("-D".into()); args.push(symbol.into()); } if self.header_file { args.push("-h".into()); } if self.system_include_files { args.push("-i".into()); } if let Some(directory) = &self.directory { args.push("-I".into()); args.push(directory.into()); } if let Some(outfile) = &self.outfile { args.push("-o".into()); args.push(outfile.into()); } if let Some(behavior) = &self.runtime_behavior { args.push("-r".into()); args.push(behavior.into()); } if self.regression { args.push("--regression".into()); } if self.autocommit { args.push("-t".into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = EcpgBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("ecpg"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = EcpgBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./ecpg""#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\ecpg""#; assert_eq!(format!("{command_prefix}"), command.to_command_string()); } #[test] fn test_builder() { let command = EcpgBuilder::new() .env("PGDATABASE", "database") .c() .compatibility_mode("mode") .symbol("symbol") .header_file() .system_include_files() .directory("directory") .outfile("outfile") .runtime_behavior("behavior") .regression() .autocommit() .version() .help() .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"ecpg" "-c" "-C" "mode" "-D" "symbol" "-h" "-i" "-I" "directory" "-o" "outfile" "-r" "behavior" "--regression" "-t" "--version" "--help""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/error.rs ================================================ /// `PostgreSQL` command result type pub type Result = core::result::Result; /// `PostgreSQL` command errors #[derive(Debug, thiserror::Error)] pub enum Error { /// Error when a command fails #[error("Command error: stdout={stdout}; stderr={stderr}")] CommandError { stdout: String, stderr: String }, /// Error when IO operations fail #[error("{0}")] IoError(String), /// Error when a command fails to execute before the timeout is reached #[error("{0}")] TimeoutError(String), } /// Convert [standard IO errors](std::io::Error) to a [embedded errors](Error::IoError) impl From for Error { fn from(error: std::io::Error) -> Self { Error::IoError(error.to_string()) } } #[cfg(feature = "tokio")] /// Convert [elapsed time errors](tokio::time::error::Elapsed) to [embedded errors](Error::TimeoutError) impl From for Error { fn from(error: tokio::time::error::Elapsed) -> Self { Error::TimeoutError(error.to_string()) } } /// These are relatively low value tests; they are here to reduce the coverage gap and /// ensure that the error conversions are working as expected. #[cfg(test)] mod test { use super::*; #[test] fn test_from_io_error() { let io_error = std::io::Error::other("test"); let error = Error::from(io_error); assert_eq!(error.to_string(), "test"); } #[cfg(feature = "tokio")] #[tokio::test] async fn test_from_elapsed_error() { let result = tokio::time::timeout(std::time::Duration::from_nanos(1), async { tokio::time::sleep(std::time::Duration::from_secs(1)).await; }) .await; assert!(result.is_err()); if let Err(elapsed_error) = result { let error = Error::from(elapsed_error); assert_eq!(error.to_string(), "deadline has elapsed"); } } } ================================================ FILE: postgresql_commands/src/initdb.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `initdb` initializes a `PostgreSQL` database cluster. #[derive(Clone, Debug, Default)] pub struct InitDbBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, auth: Option, auth_host: Option, auth_local: Option, pgdata: Option, encoding: Option, allow_group_access: bool, icu_locale: Option, icu_rules: Option, data_checksums: bool, locale: Option, lc_collate: Option, lc_ctype: Option, lc_messages: Option, lc_monetary: Option, lc_numeric: Option, lc_time: Option, no_locale: bool, locale_provider: Option, pwfile: Option, text_search_config: Option, username: Option, pwprompt: bool, waldir: Option, wal_segsize: Option, set: Option, debug: bool, discard_caches: bool, directory: Option, no_clean: bool, no_sync: bool, no_instructions: bool, show: bool, sync_only: bool, version: bool, help: bool, } impl InitDbBuilder { /// Create a new [`InitDbBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`InitDbBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { Self::new() .program_dir(settings.get_binary_dir()) .username(settings.get_username()) } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// Set the default authentication method for local connections #[must_use] pub fn auth>(mut self, auth: S) -> Self { self.auth = Some(auth.as_ref().to_os_string()); self } /// Set the default authentication method for local TCP/IP connections #[must_use] pub fn auth_host>(mut self, auth_host: S) -> Self { self.auth_host = Some(auth_host.as_ref().to_os_string()); self } /// Set the default authentication method for local-socket connections #[must_use] pub fn auth_local>(mut self, auth_local: S) -> Self { self.auth_local = Some(auth_local.as_ref().to_os_string()); self } /// Set the location for this database cluster #[must_use] pub fn pgdata>(mut self, pgdata: P) -> Self { self.pgdata = Some(pgdata.into()); self } /// Set the default encoding for new databases #[must_use] pub fn encoding>(mut self, encoding: S) -> Self { self.encoding = Some(encoding.as_ref().to_os_string()); self } /// Allow group read/execute on data directory #[must_use] pub fn allow_group_access(mut self) -> Self { self.allow_group_access = true; self } /// Set the ICU locale ID for new databases #[must_use] pub fn icu_locale>(mut self, icu_locale: S) -> Self { self.icu_locale = Some(icu_locale.as_ref().to_os_string()); self } /// Set additional ICU collation rules for new databases #[must_use] pub fn icu_rules>(mut self, icu_rules: S) -> Self { self.icu_rules = Some(icu_rules.as_ref().to_os_string()); self } /// Use data page checksums #[must_use] pub fn data_checksums(mut self) -> Self { self.data_checksums = true; self } /// Set the default locale for new databases #[must_use] pub fn locale>(mut self, locale: S) -> Self { self.locale = Some(locale.as_ref().to_os_string()); self } /// Set the default locale in the respective category for new databases #[must_use] pub fn lc_collate>(mut self, lc_collate: S) -> Self { self.lc_collate = Some(lc_collate.as_ref().to_os_string()); self } /// Set the default locale in the respective category for new databases #[must_use] pub fn lc_ctype>(mut self, lc_ctype: S) -> Self { self.lc_ctype = Some(lc_ctype.as_ref().to_os_string()); self } /// Set the default locale in the respective category for new databases #[must_use] pub fn lc_messages>(mut self, lc_messages: S) -> Self { self.lc_messages = Some(lc_messages.as_ref().to_os_string()); self } /// Set the default locale in the respective category for new databases #[must_use] pub fn lc_monetary>(mut self, lc_monetary: S) -> Self { self.lc_monetary = Some(lc_monetary.as_ref().to_os_string()); self } /// Set the default locale in the respective category for new databases #[must_use] pub fn lc_numeric>(mut self, lc_numeric: S) -> Self { self.lc_numeric = Some(lc_numeric.as_ref().to_os_string()); self } /// Set the default locale in the respective category for new databases #[must_use] pub fn lc_time>(mut self, lc_time: S) -> Self { self.lc_time = Some(lc_time.as_ref().to_os_string()); self } /// Equivalent to --locale=C #[must_use] pub fn no_locale(mut self) -> Self { self.no_locale = true; self } /// Set the default locale provider for new databases #[must_use] pub fn locale_provider>(mut self, locale_provider: S) -> Self { self.locale_provider = Some(locale_provider.as_ref().to_os_string()); self } /// Read password for the new superuser from file #[must_use] pub fn pwfile>(mut self, pwfile: P) -> Self { self.pwfile = Some(pwfile.into()); self } /// Set the default text search configuration #[must_use] pub fn text_search_config>(mut self, text_search_config: S) -> Self { self.text_search_config = Some(text_search_config.as_ref().to_os_string()); self } /// Set the database superuser name #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// Prompt for a password for the new superuser #[must_use] pub fn pwprompt(mut self) -> Self { self.pwprompt = true; self } /// Set the location for the write-ahead log directory #[must_use] pub fn waldir>(mut self, waldir: S) -> Self { self.waldir = Some(waldir.as_ref().to_os_string()); self } /// Set the size of WAL segments, in megabytes #[must_use] pub fn wal_segsize>(mut self, wal_segsize: S) -> Self { self.wal_segsize = Some(wal_segsize.as_ref().to_os_string()); self } /// Override default setting for server parameter #[must_use] pub fn set>(mut self, set: S) -> Self { self.set = Some(set.as_ref().to_os_string()); self } /// Generate lots of debugging output #[must_use] pub fn debug(mut self) -> Self { self.debug = true; self } /// Set `debug_discard_caches=1` #[must_use] pub fn discard_caches(mut self) -> Self { self.discard_caches = true; self } /// Set where to find the input files #[must_use] pub fn directory>(mut self, directory: S) -> Self { self.directory = Some(directory.as_ref().to_os_string()); self } /// Do not clean up after errors #[must_use] pub fn no_clean(mut self) -> Self { self.no_clean = true; self } /// Do not wait for changes to be written safely to disk #[must_use] pub fn no_sync(mut self) -> Self { self.no_sync = true; self } /// Do not print instructions for next steps #[must_use] pub fn no_instructions(mut self) -> Self { self.no_instructions = true; self } /// Show internal settings #[must_use] pub fn show(mut self) -> Self { self.show = true; self } /// Only sync database files to disk, then exit #[must_use] pub fn sync_only(mut self) -> Self { self.sync_only = true; self } /// Output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// Show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } } impl CommandBuilder for InitDbBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "initdb".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command #[expect(clippy::too_many_lines)] fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(auth) = &self.auth { args.push("--auth".into()); args.push(auth.into()); } if let Some(auth_host) = &self.auth_host { args.push("--auth-host".into()); args.push(auth_host.into()); } if let Some(auth_local) = &self.auth_local { args.push("--auth-local".into()); args.push(auth_local.into()); } if let Some(pgdata) = &self.pgdata { args.push("--pgdata".into()); args.push(pgdata.into()); } if let Some(encoding) = &self.encoding { args.push("--encoding".into()); args.push(encoding.into()); } if self.allow_group_access { args.push("--allow-group-access".into()); } if let Some(icu_locale) = &self.icu_locale { args.push("--icu-locale".into()); args.push(icu_locale.into()); } if let Some(icu_rules) = &self.icu_rules { args.push("--icu-rules".into()); args.push(icu_rules.into()); } if self.data_checksums { args.push("--data-checksums".into()); } if let Some(locale) = &self.locale { args.push("--locale".into()); args.push(locale.into()); } if let Some(lc_collate) = &self.lc_collate { args.push("--lc-collate".into()); args.push(lc_collate.into()); } if let Some(lc_ctype) = &self.lc_ctype { args.push("--lc-ctype".into()); args.push(lc_ctype.into()); } if let Some(lc_messages) = &self.lc_messages { args.push("--lc-messages".into()); args.push(lc_messages.into()); } if let Some(lc_monetary) = &self.lc_monetary { args.push("--lc-monetary".into()); args.push(lc_monetary.into()); } if let Some(lc_numeric) = &self.lc_numeric { args.push("--lc-numeric".into()); args.push(lc_numeric.into()); } if let Some(lc_time) = &self.lc_time { args.push("--lc-time".into()); args.push(lc_time.into()); } if self.no_locale { args.push("--no-locale".into()); } if let Some(locale_provider) = &self.locale_provider { args.push("--locale-provider".into()); args.push(locale_provider.into()); } if let Some(pwfile) = &self.pwfile { args.push("--pwfile".into()); args.push(pwfile.into()); } if let Some(text_search_config) = &self.text_search_config { args.push("--text-search-config".into()); args.push(text_search_config.into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.pwprompt { args.push("--pwprompt".into()); } if let Some(waldir) = &self.waldir { args.push("--waldir".into()); args.push(waldir.into()); } if let Some(wal_segsize) = &self.wal_segsize { args.push("--wal-segsize".into()); args.push(wal_segsize.into()); } if let Some(set) = &self.set { args.push("--set".into()); args.push(set.into()); } if self.debug { args.push("--debug".into()); } if self.discard_caches { args.push("--discard-caches".into()); } if let Some(directory) = &self.directory { args.push("--directory".into()); args.push(directory.into()); } if self.no_clean { args.push("--no-clean".into()); } if self.no_sync { args.push("--no-sync".into()); } if self.no_instructions { args.push("--no-instructions".into()); } if self.show { args.push("--show".into()); } if self.sync_only { args.push("--sync-only".into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = InitDbBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("initdb"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = InitDbBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./initdb" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\initdb" "#; assert_eq!( format!(r#"{command_prefix}"--username" "postgres""#), command.to_command_string() ); } #[test] fn test_builder() { let command = InitDbBuilder::new() .env("PGDATABASE", "database") .auth("md5") .auth_host("md5") .auth_local("md5") .pgdata("pgdata") .encoding("UTF8") .allow_group_access() .icu_locale("en_US") .icu_rules("phonebook") .data_checksums() .locale("en_US") .lc_collate("en_US") .lc_ctype("en_US") .lc_messages("en_US") .lc_monetary("en_US") .lc_numeric("en_US") .lc_time("en_US") .no_locale() .locale_provider("icu") .pwfile(".pwfile") .text_search_config("english") .username("postgres") .pwprompt() .waldir("waldir") .wal_segsize("1") .set("timezone=UTC") .debug() .discard_caches() .directory("directory") .no_clean() .no_sync() .no_instructions() .show() .sync_only() .version() .help() .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"initdb" "--auth" "md5" "--auth-host" "md5" "--auth-local" "md5" "--pgdata" "pgdata" "--encoding" "UTF8" "--allow-group-access" "--icu-locale" "en_US" "--icu-rules" "phonebook" "--data-checksums" "--locale" "en_US" "--lc-collate" "en_US" "--lc-ctype" "en_US" "--lc-messages" "en_US" "--lc-monetary" "en_US" "--lc-numeric" "en_US" "--lc-time" "en_US" "--no-locale" "--locale-provider" "icu" "--pwfile" ".pwfile" "--text-search-config" "english" "--username" "postgres" "--pwprompt" "--waldir" "waldir" "--wal-segsize" "1" "--set" "timezone=UTC" "--debug" "--discard-caches" "--directory" "directory" "--no-clean" "--no-sync" "--no-instructions" "--show" "--sync-only" "--version" "--help""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/lib.rs ================================================ //! Command builders for interacting with `PostgreSQL` via CLI. //! //! The commands are implemented as builders, which can be used to construct a //! [standard Command](std::process::Command) or [tokio Command](tokio::process::Command). pub mod clusterdb; pub mod createdb; pub mod createuser; pub mod dropdb; pub mod dropuser; pub mod ecpg; pub mod error; pub mod initdb; pub mod oid2name; pub mod pg_amcheck; pub mod pg_archivecleanup; pub mod pg_basebackup; pub mod pg_checksums; pub mod pg_config; pub mod pg_controldata; pub mod pg_ctl; pub mod pg_dump; pub mod pg_dumpall; pub mod pg_isready; pub mod pg_receivewal; pub mod pg_recvlogical; pub mod pg_resetwal; pub mod pg_restore; pub mod pg_rewind; pub mod pg_test_fsync; pub mod pg_test_timing; pub mod pg_upgrade; pub mod pg_verifybackup; pub mod pg_waldump; pub mod pgbench; pub mod postgres; pub mod psql; pub mod reindexdb; pub mod traits; pub mod vacuumdb; pub mod vacuumlo; pub use error::{Error, Result}; #[cfg(test)] pub use traits::TestSettings; #[cfg(test)] pub use traits::TestSocketSettings; pub use traits::{AsyncCommandExecutor, CommandBuilder, CommandExecutor, Settings}; ================================================ FILE: postgresql_commands/src/oid2name.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `oid2name` helps to examine the file structure used by `PostgreSQL`. #[derive(Clone, Debug, Default)] pub struct Oid2NameBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, filenode: Option, indexes: bool, oid: Option, quiet: bool, tablespaces: bool, system_objects: bool, table: Option, version: bool, extended: bool, help: bool, dbname: Option, host: Option, port: Option, username: Option, } impl Oid2NameBuilder { /// Create a new [`Oid2NameBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`Oid2NameBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// show info for table with given file node #[must_use] pub fn filenode>(mut self, filenode: S) -> Self { self.filenode = Some(filenode.as_ref().to_os_string()); self } /// show indexes and sequences too #[must_use] pub fn indexes(mut self) -> Self { self.indexes = true; self } /// show info for table with given OID #[must_use] pub fn oid>(mut self, oid: S) -> Self { self.oid = Some(oid.as_ref().to_os_string()); self } /// quiet (don't show headers) #[must_use] pub fn quiet(mut self) -> Self { self.quiet = true; self } /// show all tablespaces #[must_use] pub fn tablespaces(mut self) -> Self { self.tablespaces = true; self } /// show system objects too #[must_use] pub fn system_objects(mut self) -> Self { self.system_objects = true; self } /// show info for named table #[must_use] pub fn table>(mut self, table: S) -> Self { self.table = Some(table.as_ref().to_os_string()); self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// extended (show additional columns) #[must_use] pub fn extended(mut self) -> Self { self.extended = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// database to connect to #[must_use] pub fn dbname>(mut self, dbname: S) -> Self { self.dbname = Some(dbname.as_ref().to_os_string()); self } /// database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// database server port number #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// connect as specified database user #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } } impl CommandBuilder for Oid2NameBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "oid2name".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(filenode) = &self.filenode { args.push("--filenode".into()); args.push(filenode.into()); } if self.indexes { args.push("--indexes".into()); } if let Some(oid) = &self.oid { args.push("--oid".into()); args.push(oid.into()); } if self.quiet { args.push("--quiet".into()); } if self.tablespaces { args.push("--tablespaces".into()); } if self.system_objects { args.push("--system-objects".into()); } if let Some(table) = &self.table { args.push("--table".into()); args.push(table.into()); } if self.version { args.push("--version".into()); } if self.extended { args.push("--extended".into()); } if self.help { args.push("--help".into()); } if let Some(dbname) = &self.dbname { args.push("--dbname".into()); args.push(dbname.into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = Oid2NameBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("oid2name"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = Oid2NameBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./oid2name" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\oid2name" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = Oid2NameBuilder::new() .env("PGDATABASE", "database") .filenode("filenode") .indexes() .oid("oid") .quiet() .tablespaces() .system_objects() .table("table") .version() .extended() .help() .dbname("dbname") .host("localhost") .port(5432) .username("username") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"oid2name" "--filenode" "filenode" "--indexes" "--oid" "oid" "--quiet" "--tablespaces" "--system-objects" "--table" "table" "--version" "--extended" "--help" "--dbname" "dbname" "--host" "localhost" "--port" "5432" "--username" "username""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = Oid2NameBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./oid2name" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\oid2name" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_amcheck.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_amcheck` checks objects in a `PostgreSQL` database for corruption. #[derive(Clone, Debug, Default)] pub struct PgAmCheckBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, all: bool, database: Option, exclude_database: Option, index: Option, exclude_index: Option, relation: Option, exclude_relation: Option, schema: Option, exclude_schema: Option, table: Option, exclude_table: Option, no_dependent_indexes: bool, no_dependent_toast: bool, no_strict_names: bool, exclude_toast_pointers: bool, on_error_stop: bool, skip: Option, start_block: Option, end_block: Option, heap_all_indexed: bool, parent_check: bool, root_descend: bool, host: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, maintenance_db: Option, echo: bool, jobs: Option, progress: bool, verbose: bool, version: bool, install_missing: bool, help: bool, } impl PgAmCheckBuilder { /// Create a new [`PgAmCheckBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgAmCheckBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// check all databases #[must_use] pub fn all(mut self) -> Self { self.all = true; self } /// check matching database(s) #[must_use] pub fn database>(mut self, database: S) -> Self { self.database = Some(database.as_ref().to_os_string()); self } /// do NOT check matching database(s) #[must_use] pub fn exclude_database>(mut self, exclude_database: S) -> Self { self.exclude_database = Some(exclude_database.as_ref().to_os_string()); self } /// check matching index(es) #[must_use] pub fn index>(mut self, index: S) -> Self { self.index = Some(index.as_ref().to_os_string()); self } /// do NOT check matching index(es) #[must_use] pub fn exclude_index>(mut self, exclude_index: S) -> Self { self.exclude_index = Some(exclude_index.as_ref().to_os_string()); self } /// check matching relation(s) #[must_use] pub fn relation>(mut self, relation: S) -> Self { self.relation = Some(relation.as_ref().to_os_string()); self } /// do NOT check matching relation(s) #[must_use] pub fn exclude_relation>(mut self, exclude_relation: S) -> Self { self.exclude_relation = Some(exclude_relation.as_ref().to_os_string()); self } /// check matching schema(s) #[must_use] pub fn schema>(mut self, schema: S) -> Self { self.schema = Some(schema.as_ref().to_os_string()); self } /// do NOT check matching schema(s) #[must_use] pub fn exclude_schema>(mut self, exclude_schema: S) -> Self { self.exclude_schema = Some(exclude_schema.as_ref().to_os_string()); self } /// check matching table(s) #[must_use] pub fn table>(mut self, table: S) -> Self { self.table = Some(table.as_ref().to_os_string()); self } /// do NOT check matching table(s) #[must_use] pub fn exclude_table>(mut self, exclude_table: S) -> Self { self.exclude_table = Some(exclude_table.as_ref().to_os_string()); self } /// do NOT expand list of relations to include indexes #[must_use] pub fn no_dependent_indexes(mut self) -> Self { self.no_dependent_indexes = true; self } /// do NOT expand list of relations to include TOAST tables #[must_use] pub fn no_dependent_toast(mut self) -> Self { self.no_dependent_toast = true; self } /// do NOT require patterns to match objects #[must_use] pub fn no_strict_names(mut self) -> Self { self.no_strict_names = true; self } /// do NOT follow relation TOAST pointers #[must_use] pub fn exclude_toast_pointers(mut self) -> Self { self.exclude_toast_pointers = true; self } /// stop checking at end of first corrupt page #[must_use] pub fn on_error_stop(mut self) -> Self { self.on_error_stop = true; self } /// do NOT check "all-frozen" or "all-visible" blocks #[must_use] pub fn skip>(mut self, skip: S) -> Self { self.skip = Some(skip.as_ref().to_os_string()); self } /// begin checking table(s) at the given block number #[must_use] pub fn start_block>(mut self, start_block: S) -> Self { self.start_block = Some(start_block.as_ref().to_os_string()); self } /// check table(s) only up to the given block number #[must_use] pub fn end_block>(mut self, end_block: S) -> Self { self.end_block = Some(end_block.as_ref().to_os_string()); self } /// check that all heap tuples are found within indexes #[must_use] pub fn heap_all_indexed(mut self) -> Self { self.heap_all_indexed = true; self } /// check index parent/child relationships #[must_use] pub fn parent_check(mut self) -> Self { self.parent_check = true; self } /// search from root page to refind tuples #[must_use] pub fn root_descend(mut self) -> Self { self.root_descend = true; self } /// database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// database server port #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// user name to connect as #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// force password prompt #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } /// alternate maintenance database #[must_use] pub fn maintenance_db>(mut self, maintenance_db: S) -> Self { self.maintenance_db = Some(maintenance_db.as_ref().to_os_string()); self } /// show the commands being sent to the server #[must_use] pub fn echo(mut self) -> Self { self.echo = true; self } /// use this many concurrent connections to the server #[must_use] pub fn jobs>(mut self, jobs: S) -> Self { self.jobs = Some(jobs.as_ref().to_os_string()); self } /// show progress information #[must_use] pub fn progress(mut self) -> Self { self.progress = true; self } /// write a lot of output #[must_use] pub fn verbose(mut self) -> Self { self.verbose = true; self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// install missing extensions #[must_use] pub fn install_missing(mut self) -> Self { self.install_missing = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } } impl CommandBuilder for PgAmCheckBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_amcheck".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command #[expect(clippy::too_many_lines)] fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.all { args.push("--all".into()); } if let Some(database) = &self.database { args.push("--database".into()); args.push(database.into()); } if let Some(exclude_database) = &self.exclude_database { args.push("--exclude-database".into()); args.push(exclude_database.into()); } if let Some(index) = &self.index { args.push("--index".into()); args.push(index.into()); } if let Some(exclude_index) = &self.exclude_index { args.push("--exclude-index".into()); args.push(exclude_index.into()); } if let Some(relation) = &self.relation { args.push("--relation".into()); args.push(relation.into()); } if let Some(exclude_relation) = &self.exclude_relation { args.push("--exclude-relation".into()); args.push(exclude_relation.into()); } if let Some(schema) = &self.schema { args.push("--schema".into()); args.push(schema.into()); } if let Some(exclude_schema) = &self.exclude_schema { args.push("--exclude-schema".into()); args.push(exclude_schema.into()); } if let Some(table) = &self.table { args.push("--table".into()); args.push(table.into()); } if let Some(exclude_table) = &self.exclude_table { args.push("--exclude-table".into()); args.push(exclude_table.into()); } if self.no_dependent_indexes { args.push("--no-dependent-indexes".into()); } if self.no_dependent_toast { args.push("--no-dependent-toast".into()); } if self.no_strict_names { args.push("--no-strict-names".into()); } if self.exclude_toast_pointers { args.push("--exclude-toast-pointers".into()); } if self.on_error_stop { args.push("--on-error-stop".into()); } if let Some(skip) = &self.skip { args.push("--skip".into()); args.push(skip.into()); } if let Some(start_block) = &self.start_block { args.push("--startblock".into()); args.push(start_block.into()); } if let Some(end_block) = &self.end_block { args.push("--endblock".into()); args.push(end_block.into()); } if self.heap_all_indexed { args.push("--heapallindexed".into()); } if self.parent_check { args.push("--parent-check".into()); } if self.root_descend { args.push("--rootdescend".into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } if let Some(maintenance_db) = &self.maintenance_db { args.push("--maintenance-db".into()); args.push(maintenance_db.into()); } if self.echo { args.push("--echo".into()); } if let Some(jobs) = &self.jobs { args.push("--jobs".into()); args.push(jobs.into()); } if self.progress { args.push("--progress".into()); } if self.verbose { args.push("--verbose".into()); } if self.version { args.push("--version".into()); } if self.install_missing { args.push("--install-missing".into()); } if self.help { args.push("--help".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgAmCheckBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_amcheck"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgAmCheckBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./pg_amcheck" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_amcheck" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = PgAmCheckBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./pg_amcheck" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_amcheck" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = PgAmCheckBuilder::new() .env("PGDATABASE", "database") .all() .database("database") .exclude_database("exclude_database") .index("index") .exclude_index("exclude_index") .relation("relation") .exclude_relation("exclude_relation") .schema("schema") .exclude_schema("exclude_schema") .table("table") .exclude_table("exclude_table") .no_dependent_indexes() .no_dependent_toast() .no_strict_names() .exclude_toast_pointers() .on_error_stop() .skip("skip") .start_block("start_block") .end_block("end_block") .heap_all_indexed() .parent_check() .root_descend() .host("localhost") .port(5432) .username("username") .no_password() .password() .pg_password("password") .maintenance_db("maintenance_db") .echo() .jobs("jobs") .progress() .verbose() .version() .install_missing() .help() .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_amcheck" "--all" "--database" "database" "--exclude-database" "exclude_database" "--index" "index" "--exclude-index" "exclude_index" "--relation" "relation" "--exclude-relation" "exclude_relation" "--schema" "schema" "--exclude-schema" "exclude_schema" "--table" "table" "--exclude-table" "exclude_table" "--no-dependent-indexes" "--no-dependent-toast" "--no-strict-names" "--exclude-toast-pointers" "--on-error-stop" "--skip" "skip" "--startblock" "start_block" "--endblock" "end_block" "--heapallindexed" "--parent-check" "--rootdescend" "--host" "localhost" "--port" "5432" "--username" "username" "--no-password" "--password" "--maintenance-db" "maintenance_db" "--echo" "--jobs" "jobs" "--progress" "--verbose" "--version" "--install-missing" "--help""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_archivecleanup.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_archivecleanup` removes older WAL files from `PostgreSQL` archives. #[derive(Clone, Debug, Default)] pub struct PgArchiveCleanupBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, debug: bool, dry_run: bool, version: bool, ext: Option, help: bool, archive_location: Option, oldest_kept_wal_file: Option, } impl PgArchiveCleanupBuilder { /// Create a new [`PgArchiveCleanupBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgArchiveCleanupBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { Self::new().program_dir(settings.get_binary_dir()) } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// generate debug output (verbose mode) #[must_use] pub fn debug(mut self) -> Self { self.debug = true; self } /// dry run, show the names of the files that would be removed #[must_use] pub fn dry_run(mut self) -> Self { self.dry_run = true; self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// clean up files if they have this extension #[must_use] pub fn ext>(mut self, ext: S) -> Self { self.ext = Some(ext.as_ref().to_os_string()); self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// archive location #[must_use] pub fn archive_location>(mut self, archive_location: S) -> Self { self.archive_location = Some(archive_location.as_ref().to_os_string()); self } /// oldest kept WAL file #[must_use] pub fn oldest_kept_wal_file>(mut self, oldest_kept_wal_file: S) -> Self { self.oldest_kept_wal_file = Some(oldest_kept_wal_file.as_ref().to_os_string()); self } } impl CommandBuilder for PgArchiveCleanupBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_archivecleanup".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.debug { args.push("-d".into()); } if self.dry_run { args.push("-n".into()); } if self.version { args.push("--version".into()); } if let Some(ext) = &self.ext { args.push("-x".into()); args.push(ext.into()); } if self.help { args.push("--help".into()); } if let Some(archive_location) = &self.archive_location { args.push(archive_location.into()); } if let Some(oldest_kept_wal_file) = &self.oldest_kept_wal_file { args.push(oldest_kept_wal_file.into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgArchiveCleanupBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_archivecleanup"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgArchiveCleanupBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pg_archivecleanup""#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_archivecleanup""#; assert_eq!(format!("{command_prefix}"), command.to_command_string()); } #[test] fn test_builder() { let command = PgArchiveCleanupBuilder::new() .env("PGDATABASE", "database") .debug() .dry_run() .version() .ext("partial") .help() .archive_location("archive_location") .oldest_kept_wal_file("000000010000000000000001") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_archivecleanup" "-d" "-n" "--version" "-x" "partial" "--help" "archive_location" "000000010000000000000001""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_basebackup.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_basebackup` takes a base backup of a running `PostgreSQL` server. #[derive(Clone, Debug, Default)] pub struct PgBaseBackupBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, pgdata: Option, format: Option, max_rate: Option, write_recovery_conf: bool, target: Option, tablespace_mapping: Option, waldir: Option, wal_method: Option, gzip: bool, compress: Option, checkpoint: Option, create_slot: bool, label: Option, no_clean: bool, no_sync: bool, progress: bool, slot: Option, verbose: bool, version: bool, manifest_checksums: Option, manifest_force_encode: bool, no_estimate_size: bool, no_manifest: bool, no_slot: bool, no_verify_checksums: bool, help: bool, dbname: Option, host: Option, port: Option, status_interval: Option, username: Option, no_password: bool, password: bool, pg_password: Option, } impl PgBaseBackupBuilder { /// Create a new [`PgBaseBackupBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgBaseBackupBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// receive base backup into directory #[must_use] pub fn pgdata>(mut self, pgdata: P) -> Self { self.pgdata = Some(pgdata.into()); self } /// output format (plain (default), tar) #[must_use] pub fn format>(mut self, format: S) -> Self { self.format = Some(format.as_ref().to_os_string()); self } /// maximum transfer rate to transfer data directory (in kB/s, or use suffix "k" or "M") #[must_use] pub fn max_rate>(mut self, max_rate: S) -> Self { self.max_rate = Some(max_rate.as_ref().to_os_string()); self } /// write configuration for replication #[must_use] pub fn write_recovery_conf(mut self) -> Self { self.write_recovery_conf = true; self } /// backup target (if other than client) #[must_use] pub fn target>(mut self, target: S) -> Self { self.target = Some(target.as_ref().to_os_string()); self } /// relocate tablespace in OLDDIR to NEWDIR #[must_use] pub fn tablespace_mapping>(mut self, tablespace_mapping: S) -> Self { self.tablespace_mapping = Some(tablespace_mapping.as_ref().to_os_string()); self } /// location for the write-ahead log directory #[must_use] pub fn waldir>(mut self, waldir: S) -> Self { self.waldir = Some(waldir.as_ref().to_os_string()); self } /// include required WAL files with specified method #[must_use] pub fn wal_method>(mut self, wal_method: S) -> Self { self.wal_method = Some(wal_method.as_ref().to_os_string()); self } /// compress tar output #[must_use] pub fn gzip(mut self) -> Self { self.gzip = true; self } /// compress on client or server as specified #[must_use] pub fn compress>(mut self, compress: S) -> Self { self.compress = Some(compress.as_ref().to_os_string()); self } /// set fast or spread checkpointing #[must_use] pub fn checkpoint>(mut self, checkpoint: S) -> Self { self.checkpoint = Some(checkpoint.as_ref().to_os_string()); self } /// create replication slot #[must_use] pub fn create_slot(mut self) -> Self { self.create_slot = true; self } /// set backup label #[must_use] pub fn label>(mut self, label: S) -> Self { self.label = Some(label.as_ref().to_os_string()); self } /// do not clean up after errors #[must_use] pub fn no_clean(mut self) -> Self { self.no_clean = true; self } /// do not wait for changes to be written safely to disk #[must_use] pub fn no_sync(mut self) -> Self { self.no_sync = true; self } /// show progress information #[must_use] pub fn progress(mut self) -> Self { self.progress = true; self } /// replication slot to use #[must_use] pub fn slot>(mut self, slot: S) -> Self { self.slot = Some(slot.as_ref().to_os_string()); self } /// output verbose messages #[must_use] pub fn verbose(mut self) -> Self { self.verbose = true; self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// use algorithm for manifest checksums #[must_use] pub fn manifest_checksums>(mut self, manifest_checksums: S) -> Self { self.manifest_checksums = Some(manifest_checksums.as_ref().to_os_string()); self } /// hex encode all file names in manifest #[must_use] pub fn manifest_force_encode(mut self) -> Self { self.manifest_force_encode = true; self } /// do not estimate backup size in server side #[must_use] pub fn no_estimate_size(mut self) -> Self { self.no_estimate_size = true; self } /// suppress generation of backup manifest #[must_use] pub fn no_manifest(mut self) -> Self { self.no_manifest = true; self } /// prevent creation of temporary replication slot #[must_use] pub fn no_slot(mut self) -> Self { self.no_slot = true; self } /// do not verify checksums #[must_use] pub fn no_verify_checksums(mut self) -> Self { self.no_verify_checksums = true; self } /// show this help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// connection string #[must_use] pub fn dbname>(mut self, dbname: S) -> Self { self.dbname = Some(dbname.as_ref().to_os_string()); self } /// database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// database server port number #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// time between status packets sent to server (in seconds) #[must_use] pub fn status_interval>(mut self, status_interval: S) -> Self { self.status_interval = Some(status_interval.as_ref().to_os_string()); self } /// connect as specified database user #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// force password prompt (should happen automatically) #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } } impl CommandBuilder for PgBaseBackupBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_basebackup".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command #[expect(clippy::too_many_lines)] fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(pgdata) = &self.pgdata { args.push("--pgdata".into()); args.push(pgdata.into()); } if let Some(format) = &self.format { args.push("--format".into()); args.push(format.into()); } if let Some(max_rate) = &self.max_rate { args.push("--max-rate".into()); args.push(max_rate.into()); } if self.write_recovery_conf { args.push("--write-recovery-conf".into()); } if let Some(target) = &self.target { args.push("--target".into()); args.push(target.into()); } if let Some(tablespace_mapping) = &self.tablespace_mapping { args.push("--tablespace-mapping".into()); args.push(tablespace_mapping.into()); } if let Some(waldir) = &self.waldir { args.push("--waldir".into()); args.push(waldir.into()); } if let Some(wal_method) = &self.wal_method { args.push("--wal-method".into()); args.push(wal_method.into()); } if self.gzip { args.push("--gzip".into()); } if let Some(compress) = &self.compress { args.push("--compress".into()); args.push(compress.into()); } if let Some(checkpoint) = &self.checkpoint { args.push("--checkpoint".into()); args.push(checkpoint.into()); } if self.create_slot { args.push("--create-slot".into()); } if let Some(label) = &self.label { args.push("--label".into()); args.push(label.into()); } if self.no_clean { args.push("--no-clean".into()); } if self.no_sync { args.push("--no-sync".into()); } if self.progress { args.push("--progress".into()); } if let Some(slot) = &self.slot { args.push("--slot".into()); args.push(slot.into()); } if self.verbose { args.push("--verbose".into()); } if self.version { args.push("--version".into()); } if let Some(manifest_checksums) = &self.manifest_checksums { args.push("--manifest-checksums".into()); args.push(manifest_checksums.into()); } if self.manifest_force_encode { args.push("--manifest-force-encode".into()); } if self.no_estimate_size { args.push("--no-estimate-size".into()); } if self.no_manifest { args.push("--no-manifest".into()); } if self.no_slot { args.push("--no-slot".into()); } if self.no_verify_checksums { args.push("--no-verify-checksums".into()); } if self.help { args.push("--help".into()); } if let Some(dbname) = &self.dbname { args.push("--dbname".into()); args.push(dbname.into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(status_interval) = &self.status_interval { args.push("--status-interval".into()); args.push(status_interval.into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgBaseBackupBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_basebackup"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgBaseBackupBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./pg_basebackup" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_basebackup" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = PgBaseBackupBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./pg_basebackup" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_basebackup" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = PgBaseBackupBuilder::new() .env("PGDATABASE", "database") .pgdata("pgdata") .format("plain") .max_rate("100M") .write_recovery_conf() .target("localhost") .tablespace_mapping("tablespace_mapping") .waldir("waldir") .wal_method("stream") .gzip() .compress("client") .checkpoint("fast") .create_slot() .label("my_backup") .no_clean() .no_sync() .progress() .slot("my_slot") .verbose() .version() .manifest_checksums("sha256") .manifest_force_encode() .no_estimate_size() .no_manifest() .no_slot() .no_verify_checksums() .help() .dbname("postgres") .host("localhost") .port(5432) .status_interval("10") .username("postgres") .no_password() .password() .pg_password("password") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_basebackup" "--pgdata" "pgdata" "--format" "plain" "--max-rate" "100M" "--write-recovery-conf" "--target" "localhost" "--tablespace-mapping" "tablespace_mapping" "--waldir" "waldir" "--wal-method" "stream" "--gzip" "--compress" "client" "--checkpoint" "fast" "--create-slot" "--label" "my_backup" "--no-clean" "--no-sync" "--progress" "--slot" "my_slot" "--verbose" "--version" "--manifest-checksums" "sha256" "--manifest-force-encode" "--no-estimate-size" "--no-manifest" "--no-slot" "--no-verify-checksums" "--help" "--dbname" "postgres" "--host" "localhost" "--port" "5432" "--status-interval" "10" "--username" "postgres" "--no-password" "--password""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_checksums.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_checksums` enables, disables, or verifies data checksums in a `PostgreSQL` database cluster. #[derive(Clone, Debug, Default)] pub struct PgChecksumsBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, pgdata: Option, check: bool, disable: bool, enable: bool, filenode: Option, no_sync: bool, progress: bool, verbose: bool, version: bool, help: bool, } impl PgChecksumsBuilder { /// Create a new [`PgChecksumsBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgChecksumsBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { Self::new().program_dir(settings.get_binary_dir()) } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// data directory #[must_use] pub fn pgdata>(mut self, pgdata: P) -> Self { self.pgdata = Some(pgdata.into()); self } /// check data checksums (default) #[must_use] pub fn check(mut self) -> Self { self.check = true; self } /// disable data checksums #[must_use] pub fn disable(mut self) -> Self { self.disable = true; self } /// enable data checksums #[must_use] pub fn enable(mut self) -> Self { self.enable = true; self } /// check only relation with specified filenode #[must_use] pub fn filenode>(mut self, filenode: S) -> Self { self.filenode = Some(filenode.as_ref().to_os_string()); self } /// do not wait for changes to be written safely to disk #[must_use] pub fn no_sync(mut self) -> Self { self.no_sync = true; self } /// show progress information #[must_use] pub fn progress(mut self) -> Self { self.progress = true; self } /// output verbose messages #[must_use] pub fn verbose(mut self) -> Self { self.verbose = true; self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } } impl CommandBuilder for PgChecksumsBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_checksums".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(pgdata) = &self.pgdata { args.push("--pgdata".into()); args.push(pgdata.into()); } if self.check { args.push("--check".into()); } if self.disable { args.push("--disable".into()); } if self.enable { args.push("--enable".into()); } if let Some(filenode) = &self.filenode { args.push("--filenode".into()); args.push(filenode.into()); } if self.no_sync { args.push("--no-sync".into()); } if self.progress { args.push("--progress".into()); } if self.verbose { args.push("--verbose".into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgChecksumsBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_checksums"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgChecksumsBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pg_checksums""#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_checksums""#; assert_eq!(format!("{command_prefix}"), command.to_command_string()); } #[test] fn test_builder() { let command = PgChecksumsBuilder::new() .env("PGDATABASE", "database") .pgdata("pgdata") .check() .disable() .enable() .filenode("12345") .no_sync() .progress() .verbose() .version() .help() .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_checksums" "--pgdata" "pgdata" "--check" "--disable" "--enable" "--filenode" "12345" "--no-sync" "--progress" "--verbose" "--version" "--help""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_config.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_config` provides information about the installed version of `PostgreSQL`. #[derive(Clone, Debug, Default)] pub struct PgConfigBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, bindir: bool, docdir: bool, htmldir: bool, includedir: bool, pkgincludedir: bool, includedir_server: bool, libdir: bool, pkglibdir: bool, localedir: bool, mandir: bool, sharedir: bool, sysconfdir: bool, pgxs: bool, configure: bool, cc: bool, cppflags: bool, cflags: bool, cflags_sl: bool, ldflags: bool, ldflags_ex: bool, ldflags_sl: bool, libs: bool, version: bool, help: bool, } impl PgConfigBuilder { /// Create a new [`PgConfigBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgConfigBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { Self::new().program_dir(settings.get_binary_dir()) } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// Set the bindir #[must_use] pub fn bindir(mut self) -> Self { self.bindir = true; self } /// Set the docdir #[must_use] pub fn docdir(mut self) -> Self { self.docdir = true; self } /// Set the htmldir #[must_use] pub fn htmldir(mut self) -> Self { self.htmldir = true; self } /// Set the includedir #[must_use] pub fn includedir(mut self) -> Self { self.includedir = true; self } /// Set the pkgincludedir #[must_use] pub fn pkgincludedir(mut self) -> Self { self.pkgincludedir = true; self } /// Set the `includedir_server` #[must_use] pub fn includedir_server(mut self) -> Self { self.includedir_server = true; self } /// Set the libdir #[must_use] pub fn libdir(mut self) -> Self { self.libdir = true; self } /// Set the pkglibdir #[must_use] pub fn pkglibdir(mut self) -> Self { self.pkglibdir = true; self } /// Set the localedir #[must_use] pub fn localedir(mut self) -> Self { self.localedir = true; self } /// Set the mandir #[must_use] pub fn mandir(mut self) -> Self { self.mandir = true; self } /// Set the sharedir #[must_use] pub fn sharedir(mut self) -> Self { self.sharedir = true; self } /// Set the sysconfdir #[must_use] pub fn sysconfdir(mut self) -> Self { self.sysconfdir = true; self } /// Set the pgxs #[must_use] pub fn pgxs(mut self) -> Self { self.pgxs = true; self } /// Set the configure flag #[must_use] pub fn configure(mut self) -> Self { self.configure = true; self } /// Set the cc flag #[must_use] pub fn cc(mut self) -> Self { self.cc = true; self } /// Set the cppflags flag #[must_use] pub fn cppflags(mut self) -> Self { self.cppflags = true; self } /// Set the cflags flag #[must_use] pub fn cflags(mut self) -> Self { self.cflags = true; self } /// Set the `cflags_sl` flag #[must_use] pub fn cflags_sl(mut self) -> Self { self.cflags_sl = true; self } /// Set the ldflags flag #[must_use] pub fn ldflags(mut self) -> Self { self.ldflags = true; self } /// Set the `ldflags_ex` flag #[must_use] pub fn ldflags_ex(mut self) -> Self { self.ldflags_ex = true; self } /// Set the `ldflags_sl` flag #[must_use] pub fn ldflags_sl(mut self) -> Self { self.ldflags_sl = true; self } /// Set the libs flag #[must_use] pub fn libs(mut self) -> Self { self.libs = true; self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } } impl CommandBuilder for PgConfigBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_config".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.bindir { args.push("--bindir".into()); } if self.docdir { args.push("--docdir".into()); } if self.htmldir { args.push("--htmldir".into()); } if self.includedir { args.push("--includedir".into()); } if self.pkgincludedir { args.push("--pkgincludedir".into()); } if self.includedir_server { args.push("--includedir-server".into()); } if self.libdir { args.push("--libdir".into()); } if self.pkglibdir { args.push("--pkglibdir".into()); } if self.localedir { args.push("--localedir".into()); } if self.mandir { args.push("--mandir".into()); } if self.sharedir { args.push("--sharedir".into()); } if self.sysconfdir { args.push("--sysconfdir".into()); } if self.pgxs { args.push("--pgxs".into()); } if self.configure { args.push("--configure".into()); } if self.cc { args.push("--cc".into()); } if self.cppflags { args.push("--cppflags".into()); } if self.cflags { args.push("--cflags".into()); } if self.cflags_sl { args.push("--cflags_sl".into()); } if self.ldflags { args.push("--ldflags".into()); } if self.ldflags_ex { args.push("--ldflags_ex".into()); } if self.ldflags_sl { args.push("--ldflags_sl".into()); } if self.libs { args.push("--libs".into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgConfigBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_config"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgConfigBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pg_config""#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_config""#; assert_eq!(format!("{command_prefix}"), command.to_command_string()); } #[test] fn test_builder() { let command = PgConfigBuilder::new() .env("PGDATABASE", "database") .bindir() .docdir() .htmldir() .includedir() .pkgincludedir() .includedir_server() .libdir() .pkglibdir() .localedir() .mandir() .sharedir() .sysconfdir() .pgxs() .configure() .cc() .cppflags() .cflags() .cflags_sl() .ldflags() .ldflags_ex() .ldflags_sl() .libs() .version() .help() .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_config" "--bindir" "--docdir" "--htmldir" "--includedir" "--pkgincludedir" "--includedir-server" "--libdir" "--pkglibdir" "--localedir" "--mandir" "--sharedir" "--sysconfdir" "--pgxs" "--configure" "--cc" "--cppflags" "--cflags" "--cflags_sl" "--ldflags" "--ldflags_ex" "--ldflags_sl" "--libs" "--version" "--help""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_controldata.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_controldata` displays control information of a `PostgreSQL` database cluster. #[derive(Clone, Debug, Default)] pub struct PgControlDataBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, pgdata: Option, version: bool, help: bool, } impl PgControlDataBuilder { /// Create a new [`PgControlDataBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgControlDataBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { Self::new().program_dir(settings.get_binary_dir()) } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// Set the data directory #[must_use] pub fn pgdata>(mut self, pgdata: P) -> Self { self.pgdata = Some(pgdata.into()); self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } } impl CommandBuilder for PgControlDataBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_controldata".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(pgdata) = &self.pgdata { args.push("--pgdata".into()); args.push(pgdata.into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgControlDataBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_controldata"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgControlDataBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pg_controldata""#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_controldata""#; assert_eq!(format!("{command_prefix}"), command.to_command_string()); } #[test] fn test_builder() { let command = PgControlDataBuilder::new() .env("PGDATABASE", "database") .pgdata("pgdata") .version() .help() .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!(r#"{command_prefix}"pg_controldata" "--pgdata" "pgdata" "--version" "--help""#), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_ctl.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::fmt::Display; use std::path::PathBuf; /// `pg_ctl` is a utility to initialize, start, stop, or control a `PostgreSQL` server. #[derive(Clone, Debug, Default)] pub struct PgCtlBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, mode: Option, pgdata: Option, silent: bool, timeout: Option, version: bool, wait: bool, no_wait: bool, help: bool, core_files: bool, log: Option, options: Vec, path_to_postgres: Option, shutdown_mode: Option, signal: Option, pid: Option, } #[derive(Clone, Debug)] pub enum Mode { InitDb, Kill, LogRotate, Promote, Restart, Reload, Start, Stop, Status, } impl Display for Mode { fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Mode::InitDb => write!(formatter, "initdb"), Mode::Kill => write!(formatter, "kill"), Mode::LogRotate => write!(formatter, "logrotate"), Mode::Promote => write!(formatter, "promote"), Mode::Restart => write!(formatter, "restart"), Mode::Reload => write!(formatter, "reload"), Mode::Start => write!(formatter, "start"), Mode::Stop => write!(formatter, "stop"), Mode::Status => write!(formatter, "status"), } } } #[derive(Clone, Debug)] pub enum ShutdownMode { Smart, Fast, Immediate, } impl Display for ShutdownMode { fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { ShutdownMode::Smart => write!(formatter, "smart"), ShutdownMode::Fast => write!(formatter, "fast"), ShutdownMode::Immediate => write!(formatter, "immediate"), } } } impl PgCtlBuilder { /// Create a new [`PgCtlBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgCtlBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { Self::new().program_dir(settings.get_binary_dir()) } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// mode #[must_use] pub fn mode(mut self, mode: Mode) -> Self { self.mode = Some(mode); self } /// location of the database storage area #[must_use] pub fn pgdata>(mut self, pgdata: P) -> Self { self.pgdata = Some(pgdata.into()); self } /// only print errors, no informational messages #[must_use] pub fn silent(mut self) -> Self { self.silent = true; self } /// seconds to wait when using -w option #[must_use] pub fn timeout(mut self, timeout: u16) -> Self { self.timeout = Some(timeout); self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// wait until operation completes (default) #[must_use] pub fn wait(mut self) -> Self { self.wait = true; self } /// do not wait until operation completes #[must_use] pub fn no_wait(mut self) -> Self { self.no_wait = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// allow postgres to produce core files #[must_use] pub fn core_files(mut self) -> Self { self.core_files = true; self } /// write (or append) server log to FILENAME #[must_use] pub fn log>(mut self, log: P) -> Self { self.log = Some(log.into()); self } /// command line options to pass to postgres (`PostgreSQL` server executable) or initdb #[must_use] pub fn options>(mut self, options: &[S]) -> Self { self.options = options.iter().map(|s| s.as_ref().to_os_string()).collect(); self } /// normally not necessary #[must_use] pub fn path_to_postgres>(mut self, path_to_postgres: S) -> Self { self.path_to_postgres = Some(path_to_postgres.as_ref().to_os_string()); self } /// MODE can be "smart", "fast", or "immediate" #[must_use] pub fn shutdown_mode(mut self, shutdown_mode: ShutdownMode) -> Self { self.shutdown_mode = Some(shutdown_mode); self } /// SIGNALNAME #[must_use] pub fn signal>(mut self, signal: S) -> Self { self.signal = Some(signal.as_ref().to_os_string()); self } /// PID #[must_use] pub fn pid>(mut self, pid: S) -> Self { self.pid = Some(pid.as_ref().to_os_string()); self } } impl CommandBuilder for PgCtlBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_ctl".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(mode) = &self.mode { args.push(mode.to_string().into()); } if let Some(pgdata) = &self.pgdata { args.push("--pgdata".into()); args.push(pgdata.into()); } if self.silent { args.push("--silent".into()); } if let Some(timeout) = &self.timeout { args.push("--timeout".into()); args.push(timeout.to_string().into()); } if self.version { args.push("--version".into()); } if self.wait { args.push("--wait".into()); } if self.no_wait { args.push("--no-wait".into()); } if self.help { args.push("--help".into()); } if self.core_files { args.push("--core-files".into()); } if let Some(log) = &self.log { args.push("--log".into()); args.push(log.into()); } for option in &self.options { args.push("-o".into()); args.push(option.into()); } if let Some(path_to_postgres) = &self.path_to_postgres { args.push("-p".into()); args.push(path_to_postgres.into()); } if let Some(shutdown_mode) = &self.shutdown_mode { args.push("--mode".into()); args.push(shutdown_mode.to_string().into()); } if let Some(signal) = &self.signal { args.push(signal.into()); } if let Some(pid) = &self.pid { args.push(pid.into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_display_mode() { assert_eq!("initdb", Mode::InitDb.to_string()); assert_eq!("kill", Mode::Kill.to_string()); assert_eq!("logrotate", Mode::LogRotate.to_string()); assert_eq!("promote", Mode::Promote.to_string()); assert_eq!("restart", Mode::Restart.to_string()); assert_eq!("reload", Mode::Reload.to_string()); assert_eq!("start", Mode::Start.to_string()); assert_eq!("stop", Mode::Stop.to_string()); assert_eq!("status", Mode::Status.to_string()); } #[test] fn test_display_shutdown_mode() { assert_eq!("smart", ShutdownMode::Smart.to_string()); assert_eq!("fast", ShutdownMode::Fast.to_string()); assert_eq!("immediate", ShutdownMode::Immediate.to_string()); } #[test] fn test_builder_new() { let command = PgCtlBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_ctl"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgCtlBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pg_ctl""#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_ctl""#; assert_eq!(format!("{command_prefix}"), command.to_command_string()); } #[test] fn test_builder() { let command = PgCtlBuilder::new() .env("PGDATABASE", "database") .mode(Mode::Start) .pgdata("pgdata") .silent() .timeout(60) .version() .wait() .no_wait() .help() .core_files() .log("log") .options(&["-c log_connections=on"]) .path_to_postgres("path_to_postgres") .shutdown_mode(ShutdownMode::Smart) .signal("HUP") .pid("12345") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_ctl" "start" "--pgdata" "pgdata" "--silent" "--timeout" "60" "--version" "--wait" "--no-wait" "--help" "--core-files" "--log" "log" "-o" "-c log_connections=on" "-p" "path_to_postgres" "--mode" "smart" "HUP" "12345""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_dump.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_dump` dumps a database as a text file or to other formats. #[derive(Clone, Debug, Default)] pub struct PgDumpBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, data_only: bool, large_objects: bool, no_large_objects: bool, clean: bool, create: bool, extension: Option, encoding: Option, file: Option, format: Option, jobs: Option, schema: Option, exclude_schema: Option, no_owner: bool, no_reconnect: bool, schema_only: bool, superuser: Option, table: Option, exclude_table: Option, verbose: bool, version: bool, no_privileges: bool, compress: Option, binary_upgrade: bool, column_inserts: bool, attribute_inserts: bool, disable_dollar_quoting: bool, disable_triggers: bool, enable_row_security: bool, exclude_table_data_and_children: Option, extra_float_digits: Option, if_exists: bool, include_foreign_data: Option, inserts: bool, load_via_partition_root: bool, lock_wait_timeout: Option, no_comments: bool, no_publications: bool, no_security_labels: bool, no_subscriptions: bool, no_table_access_method: bool, no_tablespaces: bool, no_toast_compression: bool, no_unlogged_table_data: bool, on_conflict_do_nothing: bool, quote_all_identifiers: bool, rows_per_insert: Option, section: Option, serializable_deferrable: bool, snapshot: Option, strict_names: bool, table_and_children: Option, use_set_session_authorization: bool, help: bool, dbname: Option, host: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, role: Option, } impl PgDumpBuilder { /// Create a new [`PgDumpBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgDumpBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// Dump only the data, not the schema #[must_use] pub fn data_only(mut self) -> Self { self.data_only = true; self } /// Dump large objects in binary format #[must_use] pub fn large_objects(mut self) -> Self { self.large_objects = true; self } /// Do not dump large objects #[must_use] pub fn no_large_objects(mut self) -> Self { self.no_large_objects = true; self } /// Output commands to clean (drop) database objects prior to outputting the commands for creating them #[must_use] pub fn clean(mut self) -> Self { self.clean = true; self } /// Output commands to create the database objects (data definition) #[must_use] pub fn create(mut self) -> Self { self.create = true; self } /// Dump data for the named extension #[must_use] pub fn extension>(mut self, extension: S) -> Self { self.extension = Some(extension.as_ref().to_os_string()); self } /// Dump data in encoding ENCODING #[must_use] pub fn encoding>(mut self, encoding: S) -> Self { self.encoding = Some(encoding.as_ref().to_os_string()); self } /// Set the output file or directory name #[must_use] pub fn file>(mut self, file: S) -> Self { self.file = Some(file.as_ref().to_os_string()); self } /// Set the output file format (custom, directory, tar, plain text (default)) #[must_use] pub fn format>(mut self, format: S) -> Self { self.format = Some(format.as_ref().to_os_string()); self } /// Use this many parallel jobs to dump #[must_use] pub fn jobs>(mut self, jobs: S) -> Self { self.jobs = Some(jobs.as_ref().to_os_string()); self } /// Dump data for the named schema(s) only #[must_use] pub fn schema>(mut self, schema: S) -> Self { self.schema = Some(schema.as_ref().to_os_string()); self } /// Do not output commands to set ownership of objects to match the original database #[must_use] pub fn exclude_schema>(mut self, exclude_schema: S) -> Self { self.exclude_schema = Some(exclude_schema.as_ref().to_os_string()); self } /// Do not output commands to set ownership of objects to match the original database #[must_use] pub fn no_owner(mut self) -> Self { self.no_owner = true; self } /// Do not reconnect to the database #[must_use] pub fn no_reconnect(mut self) -> Self { self.no_reconnect = true; self } /// Dump only the schema, no data #[must_use] pub fn schema_only(mut self) -> Self { self.schema_only = true; self } /// Dump data as a superuser #[must_use] pub fn superuser>(mut self, superuser: S) -> Self { self.superuser = Some(superuser.as_ref().to_os_string()); self } /// Dump data for the named table(s) only #[must_use] pub fn table>(mut self, table: S) -> Self { self.table = Some(table.as_ref().to_os_string()); self } /// Do not output commands to create the table(s) containing the data #[must_use] pub fn exclude_table>(mut self, exclude_table: S) -> Self { self.exclude_table = Some(exclude_table.as_ref().to_os_string()); self } /// Enable verbose mode #[must_use] pub fn verbose(mut self) -> Self { self.verbose = true; self } /// Output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// Do not output commands to set object privileges #[must_use] pub fn no_privileges(mut self) -> Self { self.no_privileges = true; self } /// Set the compress level to use #[must_use] pub fn compress>(mut self, compress: S) -> Self { self.compress = Some(compress.as_ref().to_os_string()); self } /// Dump data in a format suitable for binary upgrade #[must_use] pub fn binary_upgrade(mut self) -> Self { self.binary_upgrade = true; self } /// Dump data as INSERT commands with column names #[must_use] pub fn column_inserts(mut self) -> Self { self.column_inserts = true; self } /// Dump data as INSERT commands with column names #[must_use] pub fn attribute_inserts(mut self) -> Self { self.attribute_inserts = true; self } /// Disable dollar quoting, use SQL standard quoting #[must_use] pub fn disable_dollar_quoting(mut self) -> Self { self.disable_dollar_quoting = true; self } /// Disable triggers during data-only restore #[must_use] pub fn disable_triggers(mut self) -> Self { self.disable_triggers = true; self } /// Dump data with row security enabled #[must_use] pub fn enable_row_security(mut self) -> Self { self.enable_row_security = true; self } /// Dump data for the named table(s) but exclude data for their child tables #[must_use] pub fn exclude_table_data_and_children>( mut self, exclude_table_data_and_children: S, ) -> Self { self.exclude_table_data_and_children = Some(exclude_table_data_and_children.as_ref().to_os_string()); self } /// Set the number of digits displayed for floating-point values #[must_use] pub fn extra_float_digits>(mut self, extra_float_digits: S) -> Self { self.extra_float_digits = Some(extra_float_digits.as_ref().to_os_string()); self } /// Use IF EXISTS when dropping objects #[must_use] pub fn if_exists(mut self) -> Self { self.if_exists = true; self } /// Include foreign-data wrappers in the dump #[must_use] pub fn include_foreign_data>(mut self, include_foreign_data: S) -> Self { self.include_foreign_data = Some(include_foreign_data.as_ref().to_os_string()); self } /// Dump data as INSERT commands #[must_use] pub fn inserts(mut self) -> Self { self.inserts = true; self } /// Load data via the partition root table #[must_use] pub fn load_via_partition_root(mut self) -> Self { self.load_via_partition_root = true; self } /// Fail after waiting TIMEOUT for a table lock #[must_use] pub fn lock_wait_timeout(mut self, lock_wait_timeout: u16) -> Self { self.lock_wait_timeout = Some(lock_wait_timeout); self } /// Do not output comments #[must_use] pub fn no_comments(mut self) -> Self { self.no_comments = true; self } /// Do not output publications #[must_use] pub fn no_publications(mut self) -> Self { self.no_publications = true; self } /// Do not output security labels #[must_use] pub fn no_security_labels(mut self) -> Self { self.no_security_labels = true; self } /// Do not output subscriptions #[must_use] pub fn no_subscriptions(mut self) -> Self { self.no_subscriptions = true; self } /// Do not output table access method #[must_use] pub fn no_table_access_method(mut self) -> Self { self.no_table_access_method = true; self } /// Do not output tablespace assignments #[must_use] pub fn no_tablespaces(mut self) -> Self { self.no_tablespaces = true; self } /// Do not output TOAST table compression #[must_use] pub fn no_toast_compression(mut self) -> Self { self.no_toast_compression = true; self } /// Do not output unlogged table data #[must_use] pub fn no_unlogged_table_data(mut self) -> Self { self.no_unlogged_table_data = true; self } /// Use ON CONFLICT DO NOTHING for INSERTs #[must_use] pub fn on_conflict_do_nothing(mut self) -> Self { self.on_conflict_do_nothing = true; self } /// Quote all identifiers, even if not key words #[must_use] pub fn quote_all_identifiers(mut self) -> Self { self.quote_all_identifiers = true; self } /// Set the number of rows per INSERT #[must_use] pub fn rows_per_insert(mut self, rows_per_insert: u64) -> Self { self.rows_per_insert = Some(rows_per_insert); self } /// Dump data for the named section(s) only #[must_use] pub fn section>(mut self, section: S) -> Self { self.section = Some(section.as_ref().to_os_string()); self } /// Dump data as a serializable transaction #[must_use] pub fn serializable_deferrable(mut self) -> Self { self.serializable_deferrable = true; self } /// Use a snapshot with the specified name #[must_use] pub fn snapshot>(mut self, snapshot: S) -> Self { self.snapshot = Some(snapshot.as_ref().to_os_string()); self } /// Use strict SQL identifier syntax #[must_use] pub fn strict_names(mut self) -> Self { self.strict_names = true; self } /// Dump data for the named table(s) and their children #[must_use] pub fn table_and_children>(mut self, table_and_children: S) -> Self { self.table_and_children = Some(table_and_children.as_ref().to_os_string()); self } /// Use SET SESSION AUTHORIZATION commands instead of ALTER OWNER #[must_use] pub fn use_set_session_authorization(mut self) -> Self { self.use_set_session_authorization = true; self } /// Show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// database to connect to #[must_use] pub fn dbname>(mut self, dbname: S) -> Self { self.dbname = Some(dbname.as_ref().to_os_string()); self } /// database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// database server port #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// database user name #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// force password prompt (should happen automatically) #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } /// Specifies a role name to be used to create the dump #[must_use] pub fn role>(mut self, rolename: S) -> Self { self.role = Some(rolename.as_ref().to_os_string()); self } } impl CommandBuilder for PgDumpBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_dump".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command #[expect(clippy::too_many_lines)] fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.data_only { args.push("--data-only".into()); } if self.large_objects { args.push("--large-objects".into()); } if self.no_large_objects { args.push("--no-large-objects".into()); } if self.clean { args.push("--clean".into()); } if self.create { args.push("--create".into()); } if let Some(extension) = &self.extension { args.push("--extension".into()); args.push(extension.into()); } if let Some(encoding) = &self.encoding { args.push("--encoding".into()); args.push(encoding.into()); } if let Some(file) = &self.file { args.push("--file".into()); args.push(file.into()); } if let Some(format) = &self.format { args.push("--format".into()); args.push(format.into()); } if let Some(jobs) = &self.jobs { args.push("--jobs".into()); args.push(jobs.into()); } if let Some(schema) = &self.schema { args.push("--schema".into()); args.push(schema.into()); } if let Some(exclude_schema) = &self.exclude_schema { args.push("--exclude-schema".into()); args.push(exclude_schema.into()); } if self.no_owner { args.push("--no-owner".into()); } if self.no_reconnect { args.push("--no-reconnect".into()); } if self.schema_only { args.push("--schema-only".into()); } if let Some(superuser) = &self.superuser { args.push("--superuser".into()); args.push(superuser.into()); } if let Some(table) = &self.table { args.push("--table".into()); args.push(table.into()); } if let Some(exclude_table) = &self.exclude_table { args.push("--exclude-table".into()); args.push(exclude_table.into()); } if self.verbose { args.push("--verbose".into()); } if self.version { args.push("--version".into()); } if self.no_privileges { args.push("--no-privileges".into()); } if let Some(compress) = &self.compress { args.push("--compress".into()); args.push(compress.into()); } if self.binary_upgrade { args.push("--binary-upgrade".into()); } if self.column_inserts { args.push("--column-inserts".into()); } if self.attribute_inserts { args.push("--attribute-inserts".into()); } if self.disable_dollar_quoting { args.push("--disable-dollar-quoting".into()); } if self.disable_triggers { args.push("--disable-triggers".into()); } if self.enable_row_security { args.push("--enable-row-security".into()); } if let Some(exclude_table_data_and_children) = &self.exclude_table_data_and_children { args.push("--exclude-table-data-and-children".into()); args.push(exclude_table_data_and_children.into()); } if let Some(extra_float_digits) = &self.extra_float_digits { args.push("--extra-float-digits".into()); args.push(extra_float_digits.into()); } if self.if_exists { args.push("--if-exists".into()); } if let Some(include_foreign_data) = &self.include_foreign_data { args.push("--include-foreign-data".into()); args.push(include_foreign_data.into()); } if self.inserts { args.push("--inserts".into()); } if self.load_via_partition_root { args.push("--load-via-partition-root".into()); } if let Some(lock_wait_timeout) = &self.lock_wait_timeout { args.push("--lock-wait-timeout".into()); args.push(lock_wait_timeout.to_string().into()); } if self.no_comments { args.push("--no-comments".into()); } if self.no_publications { args.push("--no-publications".into()); } if self.no_security_labels { args.push("--no-security-labels".into()); } if self.no_subscriptions { args.push("--no-subscriptions".into()); } if self.no_table_access_method { args.push("--no-table-access-method".into()); } if self.no_tablespaces { args.push("--no-tablespaces".into()); } if self.no_toast_compression { args.push("--no-toast-compression".into()); } if self.no_unlogged_table_data { args.push("--no-unlogged-table-data".into()); } if self.on_conflict_do_nothing { args.push("--on-conflict-do-nothing".into()); } if self.quote_all_identifiers { args.push("--quote-all-identifiers".into()); } if let Some(rows_per_insert) = &self.rows_per_insert { args.push("--rows-per-insert".into()); args.push(rows_per_insert.to_string().into()); } if let Some(section) = &self.section { args.push("--section".into()); args.push(section.into()); } if self.serializable_deferrable { args.push("--serializable-deferrable".into()); } if let Some(snapshot) = &self.snapshot { args.push("--snapshot".into()); args.push(snapshot.into()); } if self.strict_names { args.push("--strict-names".into()); } if let Some(table_and_children) = &self.table_and_children { args.push("--table-and-children".into()); args.push(table_and_children.into()); } if self.use_set_session_authorization { args.push("--use-set-session-authorization".into()); } if self.help { args.push("--help".into()); } if let Some(dbname) = &self.dbname { args.push("--dbname".into()); args.push(dbname.into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } if let Some(role) = &self.role { args.push("--role".into()); args.push(role.into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgDumpBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_dump"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgDumpBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./pg_dump" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_dump" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = PgDumpBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./pg_dump" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_dump" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = PgDumpBuilder::new() .env("PGDATABASE", "database") .data_only() .large_objects() .no_large_objects() .clean() .create() .extension("extension") .encoding("UTF8") .file("file") .format("format") .jobs("jobs") .schema("schema") .exclude_schema("exclude_schema") .no_owner() .no_reconnect() .schema_only() .superuser("superuser") .table("table") .exclude_table("exclude_table") .verbose() .version() .no_privileges() .compress("compress") .binary_upgrade() .column_inserts() .attribute_inserts() .disable_dollar_quoting() .disable_triggers() .enable_row_security() .exclude_table_data_and_children("exclude_table_data_and_children") .extra_float_digits("extra_float_digits") .if_exists() .include_foreign_data("include_foreign_data") .inserts() .load_via_partition_root() .lock_wait_timeout(10) .no_comments() .no_publications() .no_security_labels() .no_subscriptions() .no_table_access_method() .no_tablespaces() .no_toast_compression() .no_unlogged_table_data() .on_conflict_do_nothing() .quote_all_identifiers() .rows_per_insert(100) .section("section") .serializable_deferrable() .snapshot("snapshot") .strict_names() .table_and_children("table_and_children") .use_set_session_authorization() .help() .dbname("dbname") .host("localhost") .port(5432) .username("postgres") .no_password() .password() .pg_password("password") .role("role") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_dump" "--data-only" "--large-objects" "--no-large-objects" "--clean" "--create" "--extension" "extension" "--encoding" "UTF8" "--file" "file" "--format" "format" "--jobs" "jobs" "--schema" "schema" "--exclude-schema" "exclude_schema" "--no-owner" "--no-reconnect" "--schema-only" "--superuser" "superuser" "--table" "table" "--exclude-table" "exclude_table" "--verbose" "--version" "--no-privileges" "--compress" "compress" "--binary-upgrade" "--column-inserts" "--attribute-inserts" "--disable-dollar-quoting" "--disable-triggers" "--enable-row-security" "--exclude-table-data-and-children" "exclude_table_data_and_children" "--extra-float-digits" "extra_float_digits" "--if-exists" "--include-foreign-data" "include_foreign_data" "--inserts" "--load-via-partition-root" "--lock-wait-timeout" "10" "--no-comments" "--no-publications" "--no-security-labels" "--no-subscriptions" "--no-table-access-method" "--no-tablespaces" "--no-toast-compression" "--no-unlogged-table-data" "--on-conflict-do-nothing" "--quote-all-identifiers" "--rows-per-insert" "100" "--section" "section" "--serializable-deferrable" "--snapshot" "snapshot" "--strict-names" "--table-and-children" "table_and_children" "--use-set-session-authorization" "--help" "--dbname" "dbname" "--host" "localhost" "--port" "5432" "--username" "postgres" "--no-password" "--password" "--role" "role""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_dumpall.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_dumpall` extracts a `PostgreSQL` database cluster into an SQL script file. #[derive(Clone, Debug, Default)] pub struct PgDumpAllBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, file: Option, verbose: bool, version: bool, lock_wait_timeout: Option, help: bool, data_only: bool, clean: bool, encoding: Option, globals_only: bool, no_owner: bool, roles_only: bool, schema_only: bool, superuser: Option, tablespaces_only: bool, no_privileges: bool, binary_upgrade: bool, column_inserts: bool, disable_dollar_quoting: bool, disable_triggers: bool, exclude_database: Option, extra_float_digits: Option, if_exists: bool, inserts: bool, load_via_partition_root: bool, no_comments: bool, no_publications: bool, no_role_passwords: bool, no_security_labels: bool, no_subscriptions: bool, no_sync: bool, no_table_access_method: bool, no_tablespaces: bool, no_toast_compression: bool, no_unlogged_table_data: bool, on_conflict_do_nothing: bool, quote_all_identifiers: bool, rows_per_insert: Option, use_set_session_authorization: bool, dbname: Option, host: Option, database: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, role: Option, } impl PgDumpAllBuilder { /// Create a new [`PgDumpAllBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgDumpAllBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// output file name #[must_use] pub fn file>(mut self, file: S) -> Self { self.file = Some(file.as_ref().to_os_string()); self } /// verbose mode #[must_use] pub fn verbose(mut self) -> Self { self.verbose = true; self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// fail after waiting TIMEOUT for a table lock #[must_use] pub fn lock_wait_timeout(mut self, lock_wait_timeout: u16) -> Self { self.lock_wait_timeout = Some(lock_wait_timeout); self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// dump only the data, not the schema #[must_use] pub fn data_only(mut self) -> Self { self.data_only = true; self } /// clean (drop) database objects before recreating them #[must_use] pub fn clean(mut self) -> Self { self.clean = true; self } /// encoding for the dump #[must_use] pub fn encoding>(mut self, encoding: S) -> Self { self.encoding = Some(encoding.as_ref().to_os_string()); self } /// dump only global objects, not database-specific objects #[must_use] pub fn globals_only(mut self) -> Self { self.globals_only = true; self } /// do not output commands to set object ownership #[must_use] pub fn no_owner(mut self) -> Self { self.no_owner = true; self } /// dump only the roles, not the role memberships or privileges #[must_use] pub fn roles_only(mut self) -> Self { self.roles_only = true; self } /// dump only the object definitions (schema), not data #[must_use] pub fn schema_only(mut self) -> Self { self.schema_only = true; self } /// superuser user name to use in the dump #[must_use] pub fn superuser>(mut self, superuser: S) -> Self { self.superuser = Some(superuser.as_ref().to_os_string()); self } /// dump only the tablespace definitions #[must_use] pub fn tablespaces_only(mut self) -> Self { self.tablespaces_only = true; self } /// do not dump object privileges (grant/revoke commands) #[must_use] pub fn no_privileges(mut self) -> Self { self.no_privileges = true; self } /// dump in a format suitable for binary upgrade #[must_use] pub fn binary_upgrade(mut self) -> Self { self.binary_upgrade = true; self } /// dump data as INSERT commands with column names #[must_use] pub fn column_inserts(mut self) -> Self { self.column_inserts = true; self } /// disable dollar quoting, use SQL standard quoting #[must_use] pub fn disable_dollar_quoting(mut self) -> Self { self.disable_dollar_quoting = true; self } /// disable triggers during data-only restore #[must_use] pub fn disable_triggers(mut self) -> Self { self.disable_triggers = true; self } /// exclude the named database from the dump #[must_use] pub fn exclude_database>(mut self, exclude_database: S) -> Self { self.exclude_database = Some(exclude_database.as_ref().to_os_string()); self } /// set the number of digits displayed for floating-point values #[must_use] pub fn extra_float_digits>(mut self, extra_float_digits: S) -> Self { self.extra_float_digits = Some(extra_float_digits.as_ref().to_os_string()); self } /// use IF EXISTS when dropping objects #[must_use] pub fn if_exists(mut self) -> Self { self.if_exists = true; self } /// dump data as proper INSERT commands #[must_use] pub fn inserts(mut self) -> Self { self.inserts = true; self } /// load data via the partition root table #[must_use] pub fn load_via_partition_root(mut self) -> Self { self.load_via_partition_root = true; self } /// do not dump comments #[must_use] pub fn no_comments(mut self) -> Self { self.no_comments = true; self } /// do not dump publications #[must_use] pub fn no_publications(mut self) -> Self { self.no_publications = true; self } /// do not dump passwords for roles #[must_use] pub fn no_role_passwords(mut self) -> Self { self.no_role_passwords = true; self } /// do not dump security labels #[must_use] pub fn no_security_labels(mut self) -> Self { self.no_security_labels = true; self } /// do not dump subscriptions #[must_use] pub fn no_subscriptions(mut self) -> Self { self.no_subscriptions = true; self } /// do not wait for changes to be written safely to disk #[must_use] pub fn no_sync(mut self) -> Self { self.no_sync = true; self } /// do not dump table access method information #[must_use] pub fn no_table_access_method(mut self) -> Self { self.no_table_access_method = true; self } /// do not dump tablespace assignments #[must_use] pub fn no_tablespaces(mut self) -> Self { self.no_tablespaces = true; self } /// do not dump TOAST compression information #[must_use] pub fn no_toast_compression(mut self) -> Self { self.no_toast_compression = true; self } /// do not dump unlogged table data #[must_use] pub fn no_unlogged_table_data(mut self) -> Self { self.no_unlogged_table_data = true; self } /// use ON CONFLICT DO NOTHING for INSERTs #[must_use] pub fn on_conflict_do_nothing(mut self) -> Self { self.on_conflict_do_nothing = true; self } /// quote all identifiers, even if not key words #[must_use] pub fn quote_all_identifiers(mut self) -> Self { self.quote_all_identifiers = true; self } /// set the number of rows per INSERT command #[must_use] pub fn rows_per_insert>(mut self, rows_per_insert: S) -> Self { self.rows_per_insert = Some(rows_per_insert.as_ref().to_os_string()); self } /// use SET SESSION AUTHORIZATION commands instead of ALTER OWNER #[must_use] pub fn use_set_session_authorization(mut self) -> Self { self.use_set_session_authorization = true; self } /// database name to connect to #[must_use] pub fn dbname>(mut self, dbname: S) -> Self { self.dbname = Some(dbname.as_ref().to_os_string()); self } /// database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// database name to connect to #[must_use] pub fn database>(mut self, database: S) -> Self { self.database = Some(database.as_ref().to_os_string()); self } /// database server port number #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// user name to connect as #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// force password prompt #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } /// role name to use in the dump #[must_use] pub fn role>(mut self, role: S) -> Self { self.role = Some(role.as_ref().to_os_string()); self } } impl CommandBuilder for PgDumpAllBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_dumpall".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command #[expect(clippy::too_many_lines)] fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(file) = &self.file { args.push("--file".into()); args.push(file.into()); } if self.verbose { args.push("--verbose".into()); } if self.version { args.push("--version".into()); } if let Some(lock_wait_timeout) = &self.lock_wait_timeout { args.push("--lock-wait-timeout".into()); args.push(lock_wait_timeout.to_string().into()); } if self.help { args.push("--help".into()); } if self.data_only { args.push("--data-only".into()); } if self.clean { args.push("--clean".into()); } if let Some(encoding) = &self.encoding { args.push("--encoding".into()); args.push(encoding.into()); } if self.globals_only { args.push("--globals-only".into()); } if self.no_owner { args.push("--no-owner".into()); } if self.roles_only { args.push("--roles-only".into()); } if self.schema_only { args.push("--schema-only".into()); } if let Some(superuser) = &self.superuser { args.push("--superuser".into()); args.push(superuser.into()); } if self.tablespaces_only { args.push("--tablespaces-only".into()); } if self.no_privileges { args.push("--no-privileges".into()); } if self.binary_upgrade { args.push("--binary-upgrade".into()); } if self.column_inserts { args.push("--column-inserts".into()); } if self.disable_dollar_quoting { args.push("--disable-dollar-quoting".into()); } if self.disable_triggers { args.push("--disable-triggers".into()); } if let Some(exclude_database) = &self.exclude_database { args.push("--exclude-database".into()); args.push(exclude_database.into()); } if let Some(extra_float_digits) = &self.extra_float_digits { args.push("--extra-float-digits".into()); args.push(extra_float_digits.into()); } if self.if_exists { args.push("--if-exists".into()); } if self.inserts { args.push("--inserts".into()); } if self.load_via_partition_root { args.push("--load-via-partition-root".into()); } if self.no_comments { args.push("--no-comments".into()); } if self.no_publications { args.push("--no-publications".into()); } if self.no_role_passwords { args.push("--no-role-passwords".into()); } if self.no_security_labels { args.push("--no-security-labels".into()); } if self.no_subscriptions { args.push("--no-subscriptions".into()); } if self.no_sync { args.push("--no-sync".into()); } if self.no_table_access_method { args.push("--no-table-access-method".into()); } if self.no_tablespaces { args.push("--no-tablespaces".into()); } if self.no_toast_compression { args.push("--no-toast-compression".into()); } if self.no_unlogged_table_data { args.push("--no-unlogged-table-data".into()); } if self.on_conflict_do_nothing { args.push("--on-conflict-do-nothing".into()); } if self.quote_all_identifiers { args.push("--quote-all-identifiers".into()); } if let Some(rows_per_insert) = &self.rows_per_insert { args.push("--rows-per-insert".into()); args.push(rows_per_insert.into()); } if self.use_set_session_authorization { args.push("--use-set-session-authorization".into()); } if let Some(dbname) = &self.dbname { args.push("--dbname".into()); args.push(dbname.into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(database) = &self.database { args.push("--database".into()); args.push(database.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } if let Some(role) = &self.role { args.push("--role".into()); args.push(role.into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgDumpAllBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_dumpall"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgDumpAllBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./pg_dumpall" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_dumpall" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = PgDumpAllBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./pg_dumpall" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_dumpall" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = PgDumpAllBuilder::new() .env("PGDATABASE", "database") .file("dump.sql") .verbose() .version() .lock_wait_timeout(10) .help() .data_only() .clean() .encoding("UTF8") .globals_only() .no_owner() .roles_only() .schema_only() .superuser("postgres") .tablespaces_only() .no_privileges() .binary_upgrade() .column_inserts() .disable_dollar_quoting() .disable_triggers() .exclude_database("exclude") .extra_float_digits("2") .if_exists() .inserts() .load_via_partition_root() .no_comments() .no_publications() .no_role_passwords() .no_security_labels() .no_subscriptions() .no_sync() .no_table_access_method() .no_tablespaces() .no_toast_compression() .no_unlogged_table_data() .on_conflict_do_nothing() .quote_all_identifiers() .rows_per_insert("1000") .use_set_session_authorization() .dbname("postgres") .host("localhost") .database("postgres") .port(5432) .username("postgres") .no_password() .password() .pg_password("password") .role("postgres") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_dumpall" "--file" "dump.sql" "--verbose" "--version" "--lock-wait-timeout" "10" "--help" "--data-only" "--clean" "--encoding" "UTF8" "--globals-only" "--no-owner" "--roles-only" "--schema-only" "--superuser" "postgres" "--tablespaces-only" "--no-privileges" "--binary-upgrade" "--column-inserts" "--disable-dollar-quoting" "--disable-triggers" "--exclude-database" "exclude" "--extra-float-digits" "2" "--if-exists" "--inserts" "--load-via-partition-root" "--no-comments" "--no-publications" "--no-role-passwords" "--no-security-labels" "--no-subscriptions" "--no-sync" "--no-table-access-method" "--no-tablespaces" "--no-toast-compression" "--no-unlogged-table-data" "--on-conflict-do-nothing" "--quote-all-identifiers" "--rows-per-insert" "1000" "--use-set-session-authorization" "--dbname" "postgres" "--host" "localhost" "--database" "postgres" "--port" "5432" "--username" "postgres" "--no-password" "--password" "--role" "postgres""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_isready.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_isready` issues a connection check to a `PostgreSQL` database. #[derive(Clone, Debug, Default)] pub struct PgIsReadyBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, dbname: Option, quiet: bool, version: bool, help: bool, host: Option, port: Option, timeout: Option, username: Option, } impl PgIsReadyBuilder { /// Create a new [`PgIsReadyBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgIsReadyBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// Set the database name #[must_use] pub fn dbname>(mut self, dbname: S) -> Self { self.dbname = Some(dbname.as_ref().to_os_string()); self } /// Run quietly #[must_use] pub fn quiet(mut self) -> Self { self.quiet = true; self } /// Output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// Show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// Set the database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// Set the database server port #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// Set the seconds to wait when attempting connection, 0 disables (default: 3) #[must_use] pub fn timeout(mut self, timeout: u16) -> Self { self.timeout = Some(timeout); self } /// Set the user name to connect as #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } } impl CommandBuilder for PgIsReadyBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_isready".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(dbname) = &self.dbname { args.push("--dbname".into()); args.push(dbname.into()); } if self.quiet { args.push("--quiet".into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(timeout) = &self.timeout { args.push("--timeout".into()); args.push(timeout.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgIsReadyBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_isready"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgIsReadyBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pg_isready" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_isready" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = PgIsReadyBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pg_isready" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_isready" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = PgIsReadyBuilder::new() .env("PGDATABASE", "database") .dbname("postgres") .quiet() .version() .help() .host("localhost") .port(5432) .timeout(3) .username("postgres") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_isready" "--dbname" "postgres" "--quiet" "--version" "--help" "--host" "localhost" "--port" "5432" "--timeout" "3" "--username" "postgres""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_receivewal.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_receivewal` receives `PostgreSQL` streaming write-ahead logs. #[derive(Clone, Debug, Default)] pub struct PgReceiveWalBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, directory: Option, endpos: Option, if_not_exists: bool, no_loop: bool, no_sync: bool, status_interval: Option, slot: Option, synchronous: bool, verbose: bool, version: bool, compress: Option, help: bool, dbname: Option, host: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, create_slot: bool, drop_slot: bool, } impl PgReceiveWalBuilder { /// Create a new [`PgReceiveWalBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgReceiveWalBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// receive write-ahead log files into this directory #[must_use] pub fn directory>(mut self, directory: S) -> Self { self.directory = Some(directory.as_ref().to_os_string()); self } /// exit after receiving the specified LSN #[must_use] pub fn endpos>(mut self, endpos: S) -> Self { self.endpos = Some(endpos.as_ref().to_os_string()); self } /// do not error if slot already exists when creating a slot #[must_use] pub fn if_not_exists(mut self) -> Self { self.if_not_exists = true; self } /// do not loop on connection lost #[must_use] pub fn no_loop(mut self) -> Self { self.no_loop = true; self } /// do not wait for changes to be written safely to disk #[must_use] pub fn no_sync(mut self) -> Self { self.no_sync = true; self } /// time between status packets sent to server (default: 10) #[must_use] pub fn status_interval>(mut self, status_interval: S) -> Self { self.status_interval = Some(status_interval.as_ref().to_os_string()); self } /// replication slot to use #[must_use] pub fn slot>(mut self, slot: S) -> Self { self.slot = Some(slot.as_ref().to_os_string()); self } /// flush write-ahead log immediately after writing #[must_use] pub fn synchronous(mut self) -> Self { self.synchronous = true; self } /// output verbose messages #[must_use] pub fn verbose(mut self) -> Self { self.verbose = true; self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// compress as specified #[must_use] pub fn compress>(mut self, compress: S) -> Self { self.compress = Some(compress.as_ref().to_os_string()); self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// connection string #[must_use] pub fn dbname>(mut self, dbname: S) -> Self { self.dbname = Some(dbname.as_ref().to_os_string()); self } /// database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// database server port number #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// connect as specified database user #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// force password prompt (should happen automatically) #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } /// create a new replication slot (for the slot's name see --slot) #[must_use] pub fn create_slot(mut self) -> Self { self.create_slot = true; self } /// drop the replication slot (for the slot's name see --slot) #[must_use] pub fn drop_slot(mut self) -> Self { self.drop_slot = true; self } } impl CommandBuilder for PgReceiveWalBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_receivewal".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(directory) = &self.directory { args.push("--directory".into()); args.push(directory.into()); } if let Some(endpos) = &self.endpos { args.push("--endpos".into()); args.push(endpos.into()); } if self.if_not_exists { args.push("--if-not-exists".into()); } if self.no_loop { args.push("--no-loop".into()); } if self.no_sync { args.push("--no-sync".into()); } if let Some(status_interval) = &self.status_interval { args.push("--status-interval".into()); args.push(status_interval.into()); } if let Some(slot) = &self.slot { args.push("--slot".into()); args.push(slot.into()); } if self.synchronous { args.push("--synchronous".into()); } if self.verbose { args.push("--verbose".into()); } if self.version { args.push("--version".into()); } if let Some(compress) = &self.compress { args.push("--compress".into()); args.push(compress.into()); } if self.help { args.push("--help".into()); } if let Some(dbname) = &self.dbname { args.push("--dbname".into()); args.push(dbname.into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } if self.create_slot { args.push("--create-slot".into()); } if self.drop_slot { args.push("--drop-slot".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgReceiveWalBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_receivewal"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgReceiveWalBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./pg_receivewal" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_receivewal" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = PgReceiveWalBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./pg_receivewal" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_receivewal" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = PgReceiveWalBuilder::new() .env("PGDATABASE", "database") .directory("directory") .endpos("endpos") .if_not_exists() .no_loop() .no_sync() .status_interval("status_interval") .slot("slot") .synchronous() .verbose() .version() .compress("compress") .help() .dbname("dbname") .host("localhost") .port(5432) .username("username") .no_password() .password() .pg_password("password") .create_slot() .drop_slot() .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_receivewal" "--directory" "directory" "--endpos" "endpos" "--if-not-exists" "--no-loop" "--no-sync" "--status-interval" "status_interval" "--slot" "slot" "--synchronous" "--verbose" "--version" "--compress" "compress" "--help" "--dbname" "dbname" "--host" "localhost" "--port" "5432" "--username" "username" "--no-password" "--password" "--create-slot" "--drop-slot""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_recvlogical.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_recvlogical` controls `PostgreSQL` logical decoding streams. #[derive(Clone, Debug, Default)] pub struct PgRecvLogicalBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, create_slot: bool, drop_slot: bool, start: bool, endpos: Option, file: Option, fsync_interval: Option, if_not_exists: bool, startpos: Option, no_loop: bool, option: Option, plugin: Option, status_interval: Option, slot: Option, two_phase: bool, verbose: bool, version: bool, help: bool, dbname: Option, host: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, } impl PgRecvLogicalBuilder { /// Create a new [`PgRecvLogicalBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgRecvLogicalBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// create a new replication slot #[must_use] pub fn create_slot(mut self) -> Self { self.create_slot = true; self } /// drop the replication slot #[must_use] pub fn drop_slot(mut self) -> Self { self.drop_slot = true; self } /// start streaming in a replication slot #[must_use] pub fn start(mut self) -> Self { self.start = true; self } /// exit after receiving the specified LSN #[must_use] pub fn endpos>(mut self, endpos: S) -> Self { self.endpos = Some(endpos.as_ref().to_os_string()); self } /// receive log into this file, - for stdout #[must_use] pub fn file>(mut self, file: S) -> Self { self.file = Some(file.as_ref().to_os_string()); self } /// time between fsyncs to the output file (default: 10) #[must_use] pub fn fsync_interval>(mut self, fsync_interval: S) -> Self { self.fsync_interval = Some(fsync_interval.as_ref().to_os_string()); self } /// do not error if slot already exists when creating a slot #[must_use] pub fn if_not_exists(mut self) -> Self { self.if_not_exists = true; self } /// where in an existing slot should the streaming start #[must_use] pub fn startpos>(mut self, startpos: S) -> Self { self.startpos = Some(startpos.as_ref().to_os_string()); self } /// do not loop on connection lost #[must_use] pub fn no_loop(mut self) -> Self { self.no_loop = true; self } /// pass option NAME with optional value VALUE to the output plugin #[must_use] pub fn option>(mut self, option: S) -> Self { self.option = Some(option.as_ref().to_os_string()); self } /// use output plugin PLUGIN (default: `test_decoding`) #[must_use] pub fn plugin>(mut self, plugin: S) -> Self { self.plugin = Some(plugin.as_ref().to_os_string()); self } /// time between status packets sent to server (default: 10) #[must_use] pub fn status_interval>(mut self, status_interval: S) -> Self { self.status_interval = Some(status_interval.as_ref().to_os_string()); self } /// name of the logical replication slot #[must_use] pub fn slot>(mut self, slot: S) -> Self { self.slot = Some(slot.as_ref().to_os_string()); self } /// enable decoding of prepared transactions when creating a slot #[must_use] pub fn two_phase(mut self) -> Self { self.two_phase = true; self } /// output verbose messages #[must_use] pub fn verbose(mut self) -> Self { self.verbose = true; self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// database to connect to #[must_use] pub fn dbname>(mut self, dbname: S) -> Self { self.dbname = Some(dbname.as_ref().to_os_string()); self } /// database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// database server port number #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// connect as specified database user #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// force password prompt (should happen automatically) #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } } impl CommandBuilder for PgRecvLogicalBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_recvlogical".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.create_slot { args.push("--create-slot".into()); } if self.drop_slot { args.push("--drop-slot".into()); } if self.start { args.push("--start".into()); } if let Some(endpos) = &self.endpos { args.push("--endpos".into()); args.push(endpos.into()); } if let Some(file) = &self.file { args.push("--file".into()); args.push(file.into()); } if let Some(fsync_interval) = &self.fsync_interval { args.push("--fsync-interval".into()); args.push(fsync_interval.into()); } if self.if_not_exists { args.push("--if-not-exists".into()); } if let Some(startpos) = &self.startpos { args.push("--startpos".into()); args.push(startpos.into()); } if self.no_loop { args.push("--no-loop".into()); } if let Some(option) = &self.option { args.push("--option".into()); args.push(option.into()); } if let Some(plugin) = &self.plugin { args.push("--plugin".into()); args.push(plugin.into()); } if let Some(status_interval) = &self.status_interval { args.push("--status-interval".into()); args.push(status_interval.into()); } if let Some(slot) = &self.slot { args.push("--slot".into()); args.push(slot.into()); } if self.two_phase { args.push("--two-phase".into()); } if self.verbose { args.push("--verbose".into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } if let Some(dbname) = &self.dbname { args.push("--dbname".into()); args.push(dbname.into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgRecvLogicalBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_recvlogical"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgRecvLogicalBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./pg_recvlogical" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_recvlogical" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = PgRecvLogicalBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./pg_recvlogical" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_recvlogical" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = PgRecvLogicalBuilder::new() .env("PGDATABASE", "database") .create_slot() .drop_slot() .start() .endpos("endpos") .file("file") .fsync_interval("fsync_interval") .if_not_exists() .startpos("startpos") .no_loop() .option("option") .plugin("plugin") .status_interval("status_interval") .slot("slot") .two_phase() .verbose() .version() .help() .dbname("dbname") .host("localhost") .port(5432) .username("username") .no_password() .password() .pg_password("password") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_recvlogical" "--create-slot" "--drop-slot" "--start" "--endpos" "endpos" "--file" "file" "--fsync-interval" "fsync_interval" "--if-not-exists" "--startpos" "startpos" "--no-loop" "--option" "option" "--plugin" "plugin" "--status-interval" "status_interval" "--slot" "slot" "--two-phase" "--verbose" "--version" "--help" "--dbname" "dbname" "--host" "localhost" "--port" "5432" "--username" "username" "--no-password" "--password""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_resetwal.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_resetwal` resets the `PostgreSQL` write-ahead log. #[derive(Clone, Debug, Default)] pub struct PgResetWalBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, commit_timestamp_ids: Option<(OsString, OsString)>, pgdata: Option, epoch: Option, force: bool, next_wal_file: Option, multixact_ids: Option<(OsString, OsString)>, dry_run: bool, next_oid: Option, multixact_offset: Option, oldest_transaction_id: Option, version: bool, next_transaction_id: Option, wal_segsize: Option, help: bool, } impl PgResetWalBuilder { /// Create a new [`PgResetWalBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgResetWalBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { Self::new().program_dir(settings.get_binary_dir()) } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// set oldest and newest transactions bearing commit timestamp (zero means no change) #[must_use] pub fn commit_timestamp_ids>(mut self, xid1: S, xid2: S) -> Self { self.commit_timestamp_ids = Some((xid1.as_ref().into(), xid2.as_ref().into())); self } /// data directory #[must_use] pub fn pgdata>(mut self, datadir: P) -> Self { self.pgdata = Some(datadir.into()); self } /// set next transaction ID epoch #[must_use] pub fn epoch>(mut self, xidepoch: S) -> Self { self.epoch = Some(xidepoch.as_ref().to_os_string()); self } /// force update to be done #[must_use] pub fn force(mut self) -> Self { self.force = true; self } /// set minimum starting location for new WAL #[must_use] pub fn next_wal_file>(mut self, walfile: S) -> Self { self.next_wal_file = Some(walfile.as_ref().to_os_string()); self } /// set next and oldest multitransaction ID #[must_use] pub fn multixact_ids>(mut self, mxid1: S, mxid2: S) -> Self { self.multixact_ids = Some((mxid1.as_ref().into(), mxid2.as_ref().into())); self } /// no update, just show what would be done #[must_use] pub fn dry_run(mut self) -> Self { self.dry_run = true; self } /// set next OID #[must_use] pub fn next_oid>(mut self, oid: S) -> Self { self.next_oid = Some(oid.as_ref().to_os_string()); self } /// set next multitransaction offset #[must_use] pub fn multixact_offset>(mut self, offset: S) -> Self { self.multixact_offset = Some(offset.as_ref().to_os_string()); self } /// set oldest transaction ID #[must_use] pub fn oldest_transaction_id>(mut self, xid: S) -> Self { self.oldest_transaction_id = Some(xid.as_ref().to_os_string()); self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// set next transaction ID #[must_use] pub fn next_transaction_id>(mut self, xid: S) -> Self { self.next_transaction_id = Some(xid.as_ref().to_os_string()); self } /// size of WAL segments, in megabytes #[must_use] pub fn wal_segsize>(mut self, size: S) -> Self { self.wal_segsize = Some(size.as_ref().to_os_string()); self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } } impl CommandBuilder for PgResetWalBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_resetwal".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some((xid1, xid2)) = &self.commit_timestamp_ids { args.push("--commit-timestamp-ids".into()); args.push(format!("{},{}", xid1.to_string_lossy(), xid2.to_string_lossy()).into()); } if let Some(datadir) = &self.pgdata { args.push("--pgdata".into()); args.push(datadir.into()); } if let Some(xidepoch) = &self.epoch { args.push("--epoch".into()); args.push(xidepoch.into()); } if self.force { args.push("--force".into()); } if let Some(walfile) = &self.next_wal_file { args.push("--next-wal-file".into()); args.push(walfile.into()); } if let Some((mxid1, mxid2)) = &self.multixact_ids { args.push("--multixact-ids".into()); args.push(format!("{},{}", mxid1.to_string_lossy(), mxid2.to_string_lossy()).into()); } if self.dry_run { args.push("--dry-run".into()); } if let Some(oid) = &self.next_oid { args.push("--next-oid".into()); args.push(oid.into()); } if let Some(offset) = &self.multixact_offset { args.push("--multixact-offset".into()); args.push(offset.into()); } if let Some(xid) = &self.oldest_transaction_id { args.push("--oldest-transaction-id".into()); args.push(xid.into()); } if self.version { args.push("--version".into()); } if let Some(xid) = &self.next_transaction_id { args.push("--next-transaction-id".into()); args.push(xid.into()); } if let Some(size) = &self.wal_segsize { args.push("--wal-segsize".into()); args.push(size.into()); } if self.help { args.push("--help".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgResetWalBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_resetwal"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgResetWalBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pg_resetwal""#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_resetwal""#; assert_eq!(format!("{command_prefix}"), command.to_command_string()); } #[test] fn test_builder() { let command = PgResetWalBuilder::new() .env("PGDATABASE", "database") .commit_timestamp_ids("1", "2") .pgdata("pgdata") .epoch("epoch") .force() .next_wal_file("next_wal_file") .multixact_ids("3", "4") .dry_run() .next_oid("next_oid") .multixact_offset("multixact_offset") .oldest_transaction_id("oldest_transaction_id") .version() .next_transaction_id("next_transaction_id") .wal_segsize("wal_segsize") .help() .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_resetwal" "--commit-timestamp-ids" "1,2" "--pgdata" "pgdata" "--epoch" "epoch" "--force" "--next-wal-file" "next_wal_file" "--multixact-ids" "3,4" "--dry-run" "--next-oid" "next_oid" "--multixact-offset" "multixact_offset" "--oldest-transaction-id" "oldest_transaction_id" "--version" "--next-transaction-id" "next_transaction_id" "--wal-segsize" "wal_segsize" "--help""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_restore.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_restore` restores a `PostgreSQL` database from an archive created by `pg_dump`. #[derive(Clone, Debug, Default)] pub struct PgRestoreBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, dbname: Option, file: Option, format: Option, list: bool, verbose: bool, version: bool, help: bool, data_only: bool, clean: bool, create: bool, exit_on_error: bool, index: Option, jobs: Option, use_list: Option, schema: Option, exclude_schema: Option, no_owner: bool, function: Option, schema_only: bool, superuser: Option, table: Option, trigger: Option, no_privileges: bool, single_transaction: bool, disable_triggers: bool, enable_row_security: bool, if_exists: bool, no_comments: bool, no_data_for_failed_tables: bool, no_publications: bool, no_security_labels: bool, no_subscriptions: bool, no_table_access_method: bool, no_tablespaces: bool, section: Option, strict_names: bool, use_set_session_authorization: bool, host: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, role: Option, } impl PgRestoreBuilder { /// Create a new [`PgRestoreBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgRestoreBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// connect to database name #[must_use] pub fn dbname>(mut self, name: S) -> Self { self.dbname = Some(name.as_ref().to_os_string()); self } /// output file name (- for stdout) #[must_use] pub fn file>(mut self, filename: S) -> Self { self.file = Some(filename.as_ref().to_os_string()); self } /// backup file format (should be automatic) #[must_use] pub fn format>(mut self, format: S) -> Self { self.format = Some(format.as_ref().to_os_string()); self } /// print summarized TOC of the archive #[must_use] pub fn list(mut self) -> Self { self.list = true; self } /// verbose mode #[must_use] pub fn verbose(mut self) -> Self { self.verbose = true; self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// restore only the data, no schema #[must_use] pub fn data_only(mut self) -> Self { self.data_only = true; self } /// clean (drop) database objects before recreating #[must_use] pub fn clean(mut self) -> Self { self.clean = true; self } /// create the target database #[must_use] pub fn create(mut self) -> Self { self.create = true; self } /// exit on error, default is to continue #[must_use] pub fn exit_on_error(mut self) -> Self { self.exit_on_error = true; self } /// restore named index #[must_use] pub fn index>(mut self, name: S) -> Self { self.index = Some(name.as_ref().to_os_string()); self } /// use this many parallel jobs to restore #[must_use] pub fn jobs>(mut self, num: S) -> Self { self.jobs = Some(num.as_ref().to_os_string()); self } /// use table of contents from this file for selecting/ordering output #[must_use] pub fn use_list>(mut self, filename: S) -> Self { self.use_list = Some(filename.as_ref().to_os_string()); self } /// restore only objects in this schema #[must_use] pub fn schema>(mut self, name: S) -> Self { self.schema = Some(name.as_ref().to_os_string()); self } /// do not restore objects in this schema #[must_use] pub fn exclude_schema>(mut self, name: S) -> Self { self.exclude_schema = Some(name.as_ref().to_os_string()); self } /// skip restoration of object ownership #[must_use] pub fn no_owner(mut self) -> Self { self.no_owner = true; self } /// restore named function #[must_use] pub fn function>(mut self, name: S) -> Self { self.function = Some(name.as_ref().to_os_string()); self } /// restore only the schema, no data #[must_use] pub fn schema_only(mut self) -> Self { self.schema_only = true; self } /// superuser user name to use for disabling triggers #[must_use] pub fn superuser>(mut self, name: S) -> Self { self.superuser = Some(name.as_ref().to_os_string()); self } /// restore named relation (table, view, etc.) #[must_use] pub fn table>(mut self, name: S) -> Self { self.table = Some(name.as_ref().to_os_string()); self } /// restore named trigger #[must_use] pub fn trigger>(mut self, name: S) -> Self { self.trigger = Some(name.as_ref().to_os_string()); self } /// skip restoration of access privileges (grant/revoke) #[must_use] pub fn no_privileges(mut self) -> Self { self.no_privileges = true; self } /// restore as a single transaction #[must_use] pub fn single_transaction(mut self) -> Self { self.single_transaction = true; self } /// disable triggers during data-only restore #[must_use] pub fn disable_triggers(mut self) -> Self { self.disable_triggers = true; self } /// enable row security #[must_use] pub fn enable_row_security(mut self) -> Self { self.enable_row_security = true; self } /// use IF EXISTS when dropping objects #[must_use] pub fn if_exists(mut self) -> Self { self.if_exists = true; self } /// do not restore comments #[must_use] pub fn no_comments(mut self) -> Self { self.no_comments = true; self } /// do not restore data of tables that could not be created #[must_use] pub fn no_data_for_failed_tables(mut self) -> Self { self.no_data_for_failed_tables = true; self } /// do not restore publications #[must_use] pub fn no_publications(mut self) -> Self { self.no_publications = true; self } /// do not restore security labels #[must_use] pub fn no_security_labels(mut self) -> Self { self.no_security_labels = true; self } /// do not restore subscriptions #[must_use] pub fn no_subscriptions(mut self) -> Self { self.no_subscriptions = true; self } /// do not restore table access methods #[must_use] pub fn no_table_access_method(mut self) -> Self { self.no_table_access_method = true; self } /// do not restore tablespace assignments #[must_use] pub fn no_tablespaces(mut self) -> Self { self.no_tablespaces = true; self } /// restore named section (pre-data, data, or post-data) #[must_use] pub fn section>(mut self, section: S) -> Self { self.section = Some(section.as_ref().to_os_string()); self } /// require table and/or schema include patterns to match at least one entity each #[must_use] pub fn strict_names(mut self) -> Self { self.strict_names = true; self } /// use SET SESSION AUTHORIZATION commands instead of ALTER OWNER commands to set ownership #[must_use] pub fn use_set_session_authorization(mut self) -> Self { self.use_set_session_authorization = true; self } /// database server host or socket directory #[must_use] pub fn host>(mut self, hostname: S) -> Self { self.host = Some(hostname.as_ref().to_os_string()); self } /// database server port number #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// connect as specified database user #[must_use] pub fn username>(mut self, name: S) -> Self { self.username = Some(name.as_ref().to_os_string()); self } /// never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// force password prompt (should happen automatically) #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } /// do SET ROLE before restore #[must_use] pub fn role>(mut self, rolename: S) -> Self { self.role = Some(rolename.as_ref().to_os_string()); self } } impl CommandBuilder for PgRestoreBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_restore".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command #[expect(clippy::too_many_lines)] fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(name) = &self.dbname { args.push("--dbname".into()); args.push(name.into()); } if let Some(filename) = &self.file { args.push("--file".into()); args.push(filename.into()); } if let Some(format) = &self.format { args.push("--format".into()); args.push(format.into()); } if self.list { args.push("--list".into()); } if self.verbose { args.push("--verbose".into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } if self.data_only { args.push("--data-only".into()); } if self.clean { args.push("--clean".into()); } if self.create { args.push("--create".into()); } if self.exit_on_error { args.push("--exit-on-error".into()); } if let Some(name) = &self.index { args.push("--index".into()); args.push(name.into()); } if let Some(num) = &self.jobs { args.push("--jobs".into()); args.push(num.into()); } if let Some(filename) = &self.use_list { args.push("--use-list".into()); args.push(filename.into()); } if let Some(name) = &self.schema { args.push("--schema".into()); args.push(name.into()); } if let Some(name) = &self.exclude_schema { args.push("--exclude-schema".into()); args.push(name.into()); } if self.no_owner { args.push("--no-owner".into()); } if let Some(name) = &self.function { args.push("--function".into()); args.push(name.into()); } if self.schema_only { args.push("--schema-only".into()); } if let Some(name) = &self.superuser { args.push("--superuser".into()); args.push(name.into()); } if let Some(name) = &self.table { args.push("--table".into()); args.push(name.into()); } if let Some(name) = &self.trigger { args.push("--trigger".into()); args.push(name.into()); } if self.no_privileges { args.push("--no-privileges".into()); } if self.single_transaction { args.push("--single-transaction".into()); } if self.disable_triggers { args.push("--disable-triggers".into()); } if self.enable_row_security { args.push("--enable-row-security".into()); } if self.if_exists { args.push("--if-exists".into()); } if self.no_comments { args.push("--no-comments".into()); } if self.no_data_for_failed_tables { args.push("--no-data-for-failed-tables".into()); } if self.no_publications { args.push("--no-publications".into()); } if self.no_security_labels { args.push("--no-security-labels".into()); } if self.no_subscriptions { args.push("--no-subscriptions".into()); } if self.no_table_access_method { args.push("--no-table-access-method".into()); } if self.no_tablespaces { args.push("--no-tablespaces".into()); } if let Some(section) = &self.section { args.push("--section".into()); args.push(section.into()); } if self.strict_names { args.push("--strict-names".into()); } if self.use_set_session_authorization { args.push("--use-set-session-authorization".into()); } if let Some(hostname) = &self.host { args.push("--host".into()); args.push(hostname.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(name) = &self.username { args.push("--username".into()); args.push(name.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } if let Some(role) = &self.role { args.push("--role".into()); args.push(role.into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgRestoreBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_restore"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgRestoreBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./pg_restore" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_restore" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = PgRestoreBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./pg_restore" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_restore" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = PgRestoreBuilder::new() .env("PGDATABASE", "database") .dbname("dbname") .file("file") .format("format") .list() .verbose() .version() .help() .data_only() .clean() .create() .exit_on_error() .index("index") .jobs("jobs") .use_list("use_list") .schema("schema") .exclude_schema("exclude_schema") .no_owner() .function("function") .schema_only() .superuser("superuser") .table("table") .trigger("trigger") .no_privileges() .single_transaction() .disable_triggers() .enable_row_security() .if_exists() .no_comments() .no_data_for_failed_tables() .no_publications() .no_security_labels() .no_subscriptions() .no_table_access_method() .no_tablespaces() .section("section") .strict_names() .use_set_session_authorization() .host("localhost") .port(5432) .username("username") .no_password() .password() .pg_password("password") .role("role") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_restore" "--dbname" "dbname" "--file" "file" "--format" "format" "--list" "--verbose" "--version" "--help" "--data-only" "--clean" "--create" "--exit-on-error" "--index" "index" "--jobs" "jobs" "--use-list" "use_list" "--schema" "schema" "--exclude-schema" "exclude_schema" "--no-owner" "--function" "function" "--schema-only" "--superuser" "superuser" "--table" "table" "--trigger" "trigger" "--no-privileges" "--single-transaction" "--disable-triggers" "--enable-row-security" "--if-exists" "--no-comments" "--no-data-for-failed-tables" "--no-publications" "--no-security-labels" "--no-subscriptions" "--no-table-access-method" "--no-tablespaces" "--section" "section" "--strict-names" "--use-set-session-authorization" "--host" "localhost" "--port" "5432" "--username" "username" "--no-password" "--password" "--role" "role""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_rewind.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_rewind` synchronizes a `PostgreSQL` data directory with another data directory. #[derive(Clone, Debug, Default)] pub struct PgRewindBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, restore_target_wal: bool, target_pgdata: Option, source_pgdata: Option, source_server: Option, dry_run: bool, no_sync: bool, progress: bool, write_recovery_conf: bool, config_file: Option, debug: bool, no_ensure_shutdown: bool, version: bool, help: bool, } impl PgRewindBuilder { /// Create a new [`PgRewindBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgRewindBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { Self::new().program_dir(settings.get_binary_dir()) } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// use `restore_command` in target configuration to retrieve WAL files from archives #[must_use] pub fn restore_target_wal(mut self) -> Self { self.restore_target_wal = true; self } /// existing data directory to modify #[must_use] pub fn target_pgdata>(mut self, directory: P) -> Self { self.target_pgdata = Some(directory.into()); self } /// source data directory to synchronize with #[must_use] pub fn source_pgdata>(mut self, directory: P) -> Self { self.source_pgdata = Some(directory.into()); self } /// source server to synchronize with #[must_use] pub fn source_server>(mut self, connstr: S) -> Self { self.source_server = Some(connstr.as_ref().to_os_string()); self } /// stop before modifying anything #[must_use] pub fn dry_run(mut self) -> Self { self.dry_run = true; self } /// do not wait for changes to be written safely to disk #[must_use] pub fn no_sync(mut self) -> Self { self.no_sync = true; self } /// write progress messages #[must_use] pub fn progress(mut self) -> Self { self.progress = true; self } /// write configuration for replication (requires --source-server) #[must_use] pub fn write_recovery_conf(mut self) -> Self { self.write_recovery_conf = true; self } /// use specified main server configuration file when running target cluster #[must_use] pub fn config_file>(mut self, filename: S) -> Self { self.config_file = Some(filename.as_ref().to_os_string()); self } /// write a lot of debug messages #[must_use] pub fn debug(mut self) -> Self { self.debug = true; self } /// do not automatically fix unclean shutdown #[must_use] pub fn no_ensure_shutdown(mut self) -> Self { self.no_ensure_shutdown = true; self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } } impl CommandBuilder for PgRewindBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_rewind".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.restore_target_wal { args.push("--restore-target-wal".into()); } if let Some(directory) = &self.target_pgdata { args.push("--target-pgdata".into()); args.push(directory.into()); } if let Some(directory) = &self.source_pgdata { args.push("--source-pgdata".into()); args.push(directory.into()); } if let Some(connstr) = &self.source_server { args.push("--source-server".into()); args.push(connstr.into()); } if self.dry_run { args.push("--dry-run".into()); } if self.no_sync { args.push("--no-sync".into()); } if self.progress { args.push("--progress".into()); } if self.write_recovery_conf { args.push("--write-recovery-conf".into()); } if let Some(filename) = &self.config_file { args.push("--config-file".into()); args.push(filename.into()); } if self.debug { args.push("--debug".into()); } if self.no_ensure_shutdown { args.push("--no-ensure-shutdown".into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgRewindBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_rewind"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgRewindBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pg_rewind""#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_rewind""#; assert_eq!(format!("{command_prefix}"), command.to_command_string()); } #[test] fn test_builder() { let command = PgRewindBuilder::new() .env("PGDATABASE", "database") .restore_target_wal() .target_pgdata("target_pgdata") .source_pgdata("source_pgdata") .source_server("source_server") .dry_run() .no_sync() .progress() .write_recovery_conf() .config_file("config_file") .debug() .no_ensure_shutdown() .version() .help() .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_rewind" "--restore-target-wal" "--target-pgdata" "target_pgdata" "--source-pgdata" "source_pgdata" "--source-server" "source_server" "--dry-run" "--no-sync" "--progress" "--write-recovery-conf" "--config-file" "config_file" "--debug" "--no-ensure-shutdown" "--version" "--help""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_test_fsync.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_test_fsync` command to determine fastest `wal_sync_method` for `PostgreSQL` #[derive(Clone, Debug, Default)] pub struct PgTestFsyncBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, filename: Option, secs_per_test: Option, } impl PgTestFsyncBuilder { /// Create a new [`PgTestFsyncBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgTestFsyncBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { Self::new().program_dir(settings.get_binary_dir()) } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// Set the filename #[must_use] pub fn filename>(mut self, filename: S) -> Self { self.filename = Some(filename.as_ref().to_os_string()); self } /// Set the seconds per test #[must_use] pub fn secs_per_test(mut self, secs: usize) -> Self { self.secs_per_test = Some(secs); self } } impl CommandBuilder for PgTestFsyncBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_test_fsync".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(filename) = &self.filename { args.push("-f".into()); args.push(filename.into()); } if let Some(secs) = &self.secs_per_test { args.push("-s".into()); args.push(secs.to_string().into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgTestFsyncBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_test_fsync"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgTestFsyncBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pg_test_fsync""#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_test_fsync""#; assert_eq!(format!("{command_prefix}"), command.to_command_string()); } #[test] fn test_builder() { let command = PgTestFsyncBuilder::new() .env("PGDATABASE", "database") .filename("filename") .secs_per_test(10) .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!(r#"{command_prefix}"pg_test_fsync" "-f" "filename" "-s" "10""#), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_test_timing.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_test_timing` tests the timing of a `PostgreSQL` instance. #[derive(Clone, Debug, Default)] pub struct PgTestTimingBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, duration: Option, } impl PgTestTimingBuilder { /// Create a new [`PgTestTimingBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgTestTimingBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { Self::new().program_dir(settings.get_binary_dir()) } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// set the duration for the test #[must_use] pub fn duration>(mut self, duration: S) -> Self { self.duration = Some(duration.as_ref().to_os_string()); self } } impl CommandBuilder for PgTestTimingBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_test_timing".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(duration) = &self.duration { args.push("-d".into()); args.push(duration.into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgTestTimingBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_test_timing"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgTestTimingBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pg_test_timing""#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_test_timing""#; assert_eq!(format!("{command_prefix}"), command.to_command_string()); } #[test] fn test_builder() { let command = PgTestTimingBuilder::new() .env("PGDATABASE", "database") .duration("10") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!(r#"{command_prefix}"pg_test_timing" "-d" "10""#), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_upgrade.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_upgrade` upgrades a `PostgreSQL` cluster to a different major version. #[derive(Clone, Debug, Default)] pub struct PgUpgradeBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, old_bindir: Option, new_bindir: Option, check: bool, old_datadir: Option, new_datadir: Option, jobs: Option, link: bool, no_sync: bool, old_options: Option, new_options: Option, old_port: Option, new_port: Option, retain: bool, socketdir: Option, username: Option, verbose: bool, version: bool, clone: bool, copy: bool, help: bool, } impl PgUpgradeBuilder { /// Create a new [`PgUpgradeBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgUpgradeBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { Self::new().program_dir(settings.get_binary_dir()) } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// old cluster executable directory #[must_use] pub fn old_bindir>(mut self, old_bindir: S) -> Self { self.old_bindir = Some(old_bindir.as_ref().to_os_string()); self } /// new cluster executable directory #[must_use] pub fn new_bindir>(mut self, new_bindir: S) -> Self { self.new_bindir = Some(new_bindir.as_ref().to_os_string()); self } /// check clusters only, don't change any data #[must_use] pub fn check(mut self) -> Self { self.check = true; self } /// old cluster data directory #[must_use] pub fn old_datadir>(mut self, old_datadir: S) -> Self { self.old_datadir = Some(old_datadir.as_ref().to_os_string()); self } /// new cluster data directory #[must_use] pub fn new_datadir>(mut self, new_datadir: S) -> Self { self.new_datadir = Some(new_datadir.as_ref().to_os_string()); self } /// number of simultaneous processes or threads to use #[must_use] pub fn jobs>(mut self, jobs: S) -> Self { self.jobs = Some(jobs.as_ref().to_os_string()); self } /// link instead of copying files to new cluster #[must_use] pub fn link(mut self) -> Self { self.link = true; self } /// do not wait for changes to be written safely to disk #[must_use] pub fn no_sync(mut self) -> Self { self.no_sync = true; self } /// old cluster options to pass to the server #[must_use] pub fn old_options>(mut self, old_options: S) -> Self { self.old_options = Some(old_options.as_ref().to_os_string()); self } /// new cluster options to pass to the server #[must_use] pub fn new_options>(mut self, new_options: S) -> Self { self.new_options = Some(new_options.as_ref().to_os_string()); self } /// old cluster port number #[must_use] pub fn old_port(mut self, old_port: u16) -> Self { self.old_port = Some(old_port); self } /// new cluster port number #[must_use] pub fn new_port(mut self, new_port: u16) -> Self { self.new_port = Some(new_port); self } /// retain SQL and log files after success #[must_use] pub fn retain(mut self) -> Self { self.retain = true; self } /// socket directory to use #[must_use] pub fn socketdir>(mut self, socketdir: S) -> Self { self.socketdir = Some(socketdir.as_ref().to_os_string()); self } /// cluster superuser #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// enable verbose internal logging #[must_use] pub fn verbose(mut self) -> Self { self.verbose = true; self } /// display version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// clone instead of copying files to new cluster #[must_use] pub fn clone(mut self) -> Self { self.clone = true; self } /// copy files to new cluster #[must_use] pub fn copy(mut self) -> Self { self.copy = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } } impl CommandBuilder for PgUpgradeBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_upgrade".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(old_bindir) = &self.old_bindir { args.push("--old-bindir".into()); args.push(old_bindir.into()); } if let Some(new_bindir) = &self.new_bindir { args.push("--new-bindir".into()); args.push(new_bindir.into()); } if self.check { args.push("--check".into()); } if let Some(old_datadir) = &self.old_datadir { args.push("--old-datadir".into()); args.push(old_datadir.into()); } if let Some(new_datadir) = &self.new_datadir { args.push("--new-datadir".into()); args.push(new_datadir.into()); } if let Some(jobs) = &self.jobs { args.push("--jobs".into()); args.push(jobs.into()); } if self.link { args.push("--link".into()); } if self.no_sync { args.push("--no-sync".into()); } if let Some(old_options) = &self.old_options { args.push("--old-options".into()); args.push(old_options.into()); } if let Some(new_options) = &self.new_options { args.push("--new-options".into()); args.push(new_options.into()); } if let Some(old_port) = &self.old_port { args.push("--old-port".into()); args.push(old_port.to_string().into()); } if let Some(new_port) = &self.new_port { args.push("--new-port".into()); args.push(new_port.to_string().into()); } if self.retain { args.push("--retain".into()); } if let Some(socketdir) = &self.socketdir { args.push("--socketdir".into()); args.push(socketdir.into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.verbose { args.push("--verbose".into()); } if self.version { args.push("--version".into()); } if self.clone { args.push("--clone".into()); } if self.copy { args.push("--copy".into()); } if self.help { args.push("--help".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgUpgradeBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_upgrade"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgUpgradeBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pg_upgrade""#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_upgrade""#; assert_eq!(format!("{command_prefix}"), command.to_command_string()); } #[test] fn test_builder() { let command = PgUpgradeBuilder::new() .env("PGDATABASE", "database") .old_bindir("old") .new_bindir("new") .check() .old_datadir("old_data") .new_datadir("new_data") .jobs("10") .link() .no_sync() .old_options("old") .new_options("new") .old_port(5432) .new_port(5433) .retain() .socketdir("socket") .username("user") .verbose() .version() .clone() .copy() .help() .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_upgrade" "--old-bindir" "old" "--new-bindir" "new" "--check" "--old-datadir" "old_data" "--new-datadir" "new_data" "--jobs" "10" "--link" "--no-sync" "--old-options" "old" "--new-options" "new" "--old-port" "5432" "--new-port" "5433" "--retain" "--socketdir" "socket" "--username" "user" "--verbose" "--version" "--clone" "--copy" "--help""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_verifybackup.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_verifybackup` verifies a backup against the backup manifest. #[derive(Clone, Debug, Default)] pub struct PgVerifyBackupBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, exit_on_error: bool, ignore: Option, manifest_path: Option, no_parse_wal: bool, progress: bool, quiet: bool, skip_checksums: bool, wal_directory: Option, version: bool, help: bool, } impl PgVerifyBackupBuilder { /// Create a new [`PgVerifyBackupBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgVerifyBackupBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { Self::new().program_dir(settings.get_binary_dir()) } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// exit immediately on error #[must_use] pub fn exit_on_error(mut self) -> Self { self.exit_on_error = true; self } /// ignore indicated path #[must_use] pub fn ignore>(mut self, ignore: S) -> Self { self.ignore = Some(ignore.as_ref().to_os_string()); self } /// use specified path for manifest #[must_use] pub fn manifest_path>(mut self, manifest_path: S) -> Self { self.manifest_path = Some(manifest_path.as_ref().to_os_string()); self } /// do not try to parse WAL files #[must_use] pub fn no_parse_wal(mut self) -> Self { self.no_parse_wal = true; self } /// show progress information #[must_use] pub fn progress(mut self) -> Self { self.progress = true; self } /// do not print any output, except for errors #[must_use] pub fn quiet(mut self) -> Self { self.quiet = true; self } /// skip checksum verification #[must_use] pub fn skip_checksums(mut self) -> Self { self.skip_checksums = true; self } /// use specified path for WAL files #[must_use] pub fn wal_directory>(mut self, wal_directory: S) -> Self { self.wal_directory = Some(wal_directory.as_ref().to_os_string()); self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } } impl CommandBuilder for PgVerifyBackupBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_verifybackup".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.exit_on_error { args.push("--exit-on-error".into()); } if let Some(ignore) = &self.ignore { args.push("--ignore".into()); args.push(ignore.into()); } if let Some(manifest_path) = &self.manifest_path { args.push("--manifest-path".into()); args.push(manifest_path.into()); } if self.no_parse_wal { args.push("--no-parse-wal".into()); } if self.progress { args.push("--progress".into()); } if self.quiet { args.push("--quiet".into()); } if self.skip_checksums { args.push("--skip-checksums".into()); } if let Some(wal_directory) = &self.wal_directory { args.push("--wal-directory".into()); args.push(wal_directory.into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgVerifyBackupBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_verifybackup"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgVerifyBackupBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pg_verifybackup""#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_verifybackup""#; assert_eq!(format!("{command_prefix}"), command.to_command_string()); } #[test] fn test_builder() { let command = PgVerifyBackupBuilder::new() .env("PGDATABASE", "database") .exit_on_error() .ignore("ignore") .manifest_path("manifest-path") .no_parse_wal() .progress() .quiet() .skip_checksums() .wal_directory("wal_directory") .version() .help() .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_verifybackup" "--exit-on-error" "--ignore" "ignore" "--manifest-path" "manifest-path" "--no-parse-wal" "--progress" "--quiet" "--skip-checksums" "--wal-directory" "wal_directory" "--version" "--help""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pg_waldump.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pg_waldump` decodes and displays `PostgreSQL` write-ahead logs for debugging. #[derive(Clone, Debug, Default)] pub struct PgWalDumpBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, backkup_details: bool, block: Option, end: Option, follow: bool, fork: Option, limit: Option, path: Option, quiet: bool, rmgr: Option, relation: Option, start: Option, timeline: Option, version: bool, fullpage: bool, xid: Option, stats: Option, save_fullpage: Option, help: bool, } impl PgWalDumpBuilder { /// Create a new [`PgWalDumpBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgWalDumpBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { Self::new().program_dir(settings.get_binary_dir()) } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// output detailed information about backup blocks #[must_use] pub fn backup_details(mut self) -> Self { self.backkup_details = true; self } /// with --relation, only show records that modify block N #[must_use] pub fn block>(mut self, block: S) -> Self { self.block = Some(block.as_ref().to_os_string()); self } /// stop reading at WAL location RECPTR #[must_use] pub fn end>(mut self, end: S) -> Self { self.end = Some(end.as_ref().to_os_string()); self } /// keep retrying after reaching end of WAL #[must_use] pub fn follow(mut self) -> Self { self.follow = true; self } /// only show records that modify blocks in fork FORK #[must_use] pub fn fork>(mut self, fork: S) -> Self { self.fork = Some(fork.as_ref().to_os_string()); self } /// number of records to display #[must_use] pub fn limit>(mut self, limit: S) -> Self { self.limit = Some(limit.as_ref().to_os_string()); self } /// directory in which to find WAL segment files #[must_use] pub fn path>(mut self, path: S) -> Self { self.path = Some(path.as_ref().to_os_string()); self } /// do not print any output, except for errors #[must_use] pub fn quiet(mut self) -> Self { self.quiet = true; self } /// only show records generated by resource manager RMGR #[must_use] pub fn rmgr>(mut self, rmgr: S) -> Self { self.rmgr = Some(rmgr.as_ref().to_os_string()); self } /// only show records that modify blocks in relation T/D/R #[must_use] pub fn relation>(mut self, relation: S) -> Self { self.relation = Some(relation.as_ref().to_os_string()); self } /// start reading at WAL location RECPTR #[must_use] pub fn start>(mut self, start: S) -> Self { self.start = Some(start.as_ref().to_os_string()); self } /// timeline from which to read WAL records #[must_use] pub fn timeline>(mut self, timeline: S) -> Self { self.timeline = Some(timeline.as_ref().to_os_string()); self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// only show records with a full page write #[must_use] pub fn fullpage(mut self) -> Self { self.fullpage = true; self } /// only show records with transaction ID XID #[must_use] pub fn xid>(mut self, xid: S) -> Self { self.xid = Some(xid.as_ref().to_os_string()); self } /// show statistics instead of records #[must_use] pub fn stats>(mut self, stats: S) -> Self { self.stats = Some(stats.as_ref().to_os_string()); self } /// save full page images to DIR #[must_use] pub fn save_fullpage>(mut self, save_fullpage: S) -> Self { self.save_fullpage = Some(save_fullpage.as_ref().to_os_string()); self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } } impl CommandBuilder for PgWalDumpBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pg_waldump".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.backkup_details { args.push("--bkp-details".into()); } if let Some(block) = &self.block { args.push("--block".into()); args.push(block.into()); } if let Some(end) = &self.end { args.push("--end".into()); args.push(end.into()); } if self.follow { args.push("--follow".into()); } if let Some(fork) = &self.fork { args.push("--fork".into()); args.push(fork.into()); } if let Some(limit) = &self.limit { args.push("--limit".into()); args.push(limit.into()); } if let Some(path) = &self.path { args.push("--path".into()); args.push(path.into()); } if self.quiet { args.push("--quiet".into()); } if let Some(rmgr) = &self.rmgr { args.push("--rmgr".into()); args.push(rmgr.into()); } if let Some(relation) = &self.relation { args.push("--relation".into()); args.push(relation.into()); } if let Some(start) = &self.start { args.push("--start".into()); args.push(start.into()); } if let Some(timeline) = &self.timeline { args.push("--timeline".into()); args.push(timeline.into()); } if self.version { args.push("--version".into()); } if self.fullpage { args.push("--fullpage".into()); } if let Some(xid) = &self.xid { args.push("--xid".into()); args.push(xid.into()); } if let Some(stats) = &self.stats { args.push("--stats".into()); args.push(stats.into()); } if let Some(save_fullpage) = &self.save_fullpage { args.push("--save-fullpage".into()); args.push(save_fullpage.into()); } if self.help { args.push("--help".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgWalDumpBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pg_waldump"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgWalDumpBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pg_waldump""#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pg_waldump""#; assert_eq!(format!("{command_prefix}"), command.to_command_string()); } #[test] fn test_builder() { let command = PgWalDumpBuilder::new() .env("PGDATABASE", "database") .backup_details() .block("block") .end("end") .follow() .fork("fork") .limit("limit") .path("path") .quiet() .rmgr("rmgr") .relation("relation") .start("start") .timeline("timeline") .version() .fullpage() .xid("xid") .stats("stats") .save_fullpage("save_fullpage") .help() .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pg_waldump" "--bkp-details" "--block" "block" "--end" "end" "--follow" "--fork" "fork" "--limit" "limit" "--path" "path" "--quiet" "--rmgr" "rmgr" "--relation" "relation" "--start" "start" "--timeline" "timeline" "--version" "--fullpage" "--xid" "xid" "--stats" "stats" "--save-fullpage" "save_fullpage" "--help""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/pgbench.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `pgbench` is a benchmarking tool for `PostgreSQL`. #[derive(Clone, Debug, Default)] pub struct PgBenchBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, initialize: bool, init_steps: Option, fill_factor: Option, no_vacuum: bool, quiet: bool, scale: Option, foreign_keys: bool, index_tablespace: Option, partition_method: Option, partitions: Option, tablespace: Option, unlogged_tables: bool, builtin: Option, file: Option, skip_some_updates: bool, select_only: bool, client: Option, connect: bool, define: Option, jobs: Option, log: bool, latency_limit: Option, protocol: Option, no_vacuum_bench: bool, progress: Option, report_per_command: bool, rate: Option, scale_bench: Option, transactions: Option, time: Option, vacuum_all: bool, aggregate_interval: Option, failures_detailed: bool, log_prefix: Option, max_tries: Option, progress_timestamp: bool, random_seed: Option, sampling_rate: Option, show_script: Option, verbose_errors: bool, debug: bool, host: Option, port: Option, username: Option, version: bool, help: bool, } impl PgBenchBuilder { /// Create a new [`PgBenchBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PgBenchBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// invokes initialization mode #[must_use] pub fn initialize(mut self) -> Self { self.initialize = true; self } /// run selected initialization steps #[must_use] pub fn init_steps>(mut self, steps: S) -> Self { self.init_steps = Some(steps.as_ref().to_os_string()); self } /// set fill factor #[must_use] pub fn fill_factor(mut self, factor: usize) -> Self { self.fill_factor = Some(factor); self } /// do not run VACUUM during initialization #[must_use] pub fn no_vacuum(mut self) -> Self { self.no_vacuum = true; self } /// quiet logging (one message each 5 seconds) #[must_use] pub fn quiet(mut self) -> Self { self.quiet = true; self } /// scaling factor #[must_use] pub fn scale(mut self, scale: usize) -> Self { self.scale = Some(scale); self } /// create foreign key constraints between tables #[must_use] pub fn foreign_keys(mut self) -> Self { self.foreign_keys = true; self } /// create indexes in the specified tablespace #[must_use] pub fn index_tablespace>(mut self, tablespace: S) -> Self { self.index_tablespace = Some(tablespace.as_ref().to_os_string()); self } /// partition `pgbench_accounts` with this method (default: range) #[must_use] pub fn partition_method>(mut self, method: S) -> Self { self.partition_method = Some(method.as_ref().to_os_string()); self } /// partition `pgbench_accounts` into NUM parts (default: 0) #[must_use] pub fn partitions(mut self, num: usize) -> Self { self.partitions = Some(num); self } /// create tables in the specified tablespace #[must_use] pub fn tablespace>(mut self, tablespace: S) -> Self { self.tablespace = Some(tablespace.as_ref().to_os_string()); self } /// create tables as unlogged tables #[must_use] pub fn unlogged_tables(mut self) -> Self { self.unlogged_tables = true; self } /// add builtin script NAME weighted at W (default: 1) #[must_use] pub fn builtin>(mut self, name: S) -> Self { self.builtin = Some(name.as_ref().to_os_string()); self } /// add script FILENAME weighted at W (default: 1) #[must_use] pub fn file>(mut self, filename: S) -> Self { self.file = Some(filename.as_ref().to_os_string()); self } /// skip some updates #[must_use] pub fn skip_some_updates(mut self) -> Self { self.skip_some_updates = true; self } /// perform SELECT-only transactions #[must_use] pub fn select_only(mut self) -> Self { self.select_only = true; self } /// number of concurrent database clients (default: 1) #[must_use] pub fn client(mut self, num: usize) -> Self { self.client = Some(num); self } /// establish new connection for each transaction #[must_use] pub fn connect(mut self) -> Self { self.connect = true; self } /// define variable for use by custom script #[must_use] pub fn define>(mut self, var: S) -> Self { self.define = Some(var.as_ref().to_os_string()); self } /// number of threads (default: 1) #[must_use] pub fn jobs(mut self, num: usize) -> Self { self.jobs = Some(num); self } /// write transaction times to log file #[must_use] pub fn log(mut self) -> Self { self.log = true; self } /// count transactions lasting more than NUM ms as late #[must_use] pub fn latency_limit(mut self, num: usize) -> Self { self.latency_limit = Some(num); self } /// protocol for submitting queries (default: simple) #[must_use] pub fn protocol>(mut self, protocol: S) -> Self { self.protocol = Some(protocol.as_ref().to_os_string()); self } /// do not run VACUUM before tests #[must_use] pub fn no_vacuum_bench(mut self) -> Self { self.no_vacuum_bench = true; self } /// show thread progress report every NUM seconds #[must_use] pub fn progress(mut self, num: usize) -> Self { self.progress = Some(num); self } /// report latencies, failures, and retries per command #[must_use] pub fn report_per_command(mut self) -> Self { self.report_per_command = true; self } /// target rate in transactions per second #[must_use] pub fn rate(mut self, num: usize) -> Self { self.rate = Some(num); self } /// report this scale factor in output #[must_use] pub fn scale_bench(mut self, scale: usize) -> Self { self.scale_bench = Some(scale); self } /// number of transactions each client runs (default: 10) #[must_use] pub fn transactions(mut self, num: usize) -> Self { self.transactions = Some(num); self } /// duration of benchmark test in seconds #[must_use] pub fn time(mut self, num: usize) -> Self { self.time = Some(num); self } /// vacuum all four standard tables before tests #[must_use] pub fn vacuum_all(mut self) -> Self { self.vacuum_all = true; self } /// aggregate data over NUM seconds #[must_use] pub fn aggregate_interval(mut self, num: usize) -> Self { self.aggregate_interval = Some(num); self } /// report the failures grouped by basic types #[must_use] pub fn failures_detailed(mut self) -> Self { self.failures_detailed = true; self } /// prefix for transaction time log file #[must_use] pub fn log_prefix>(mut self, prefix: S) -> Self { self.log_prefix = Some(prefix.as_ref().to_os_string()); self } /// max number of tries to run transaction (default: 1) #[must_use] pub fn max_tries(mut self, num: usize) -> Self { self.max_tries = Some(num); self } /// use Unix epoch timestamps for progress #[must_use] pub fn progress_timestamp(mut self) -> Self { self.progress_timestamp = true; self } /// set random seed ("time", "rand", integer) #[must_use] pub fn random_seed>(mut self, seed: S) -> Self { self.random_seed = Some(seed.as_ref().to_os_string()); self } /// fraction of transactions to log (e.g., 0.01 for 1%) #[must_use] pub fn sampling_rate(mut self, rate: f64) -> Self { self.sampling_rate = Some(rate); self } /// show builtin script code, then exit #[must_use] pub fn show_script>(mut self, name: S) -> Self { self.show_script = Some(name.as_ref().to_os_string()); self } /// print messages of all errors #[must_use] pub fn verbose_errors(mut self) -> Self { self.verbose_errors = true; self } /// print debugging output #[must_use] pub fn debug(mut self) -> Self { self.debug = true; self } /// database server host or socket directory #[must_use] pub fn host>(mut self, hostname: S) -> Self { self.host = Some(hostname.as_ref().to_os_string()); self } /// database server port number #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// connect as specified database user #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } } impl CommandBuilder for PgBenchBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "pgbench".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command #[expect(clippy::too_many_lines)] fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.initialize { args.push("--initialize".into()); } if let Some(steps) = &self.init_steps { args.push("--init-steps".into()); args.push(steps.into()); } if let Some(factor) = &self.fill_factor { args.push("--fillfactor".into()); args.push(factor.to_string().into()); } if self.no_vacuum { args.push("--no-vacuum".into()); } if self.quiet { args.push("--quiet".into()); } if let Some(scale) = &self.scale { args.push("--scale".into()); args.push(scale.to_string().into()); } if self.foreign_keys { args.push("--foreign-keys".into()); } if let Some(tablespace) = &self.index_tablespace { args.push("--index-tablespace".into()); args.push(tablespace.into()); } if let Some(method) = &self.partition_method { args.push("--partition-method".into()); args.push(method.into()); } if let Some(num) = &self.partitions { args.push("--partitions".into()); args.push(num.to_string().into()); } if let Some(tablespace) = &self.tablespace { args.push("--tablespace".into()); args.push(tablespace.into()); } if self.unlogged_tables { args.push("--unlogged-tables".into()); } if let Some(name) = &self.builtin { args.push("--builtin".into()); args.push(name.into()); } if let Some(filename) = &self.file { args.push("--file".into()); args.push(filename.into()); } if self.skip_some_updates { args.push("--skip-some-updates".into()); } if self.select_only { args.push("--select-only".into()); } if let Some(num) = &self.client { args.push("--client".into()); args.push(num.to_string().into()); } if self.connect { args.push("--connect".into()); } if let Some(var) = &self.define { args.push("--define".into()); args.push(var.into()); } if let Some(num) = &self.jobs { args.push("--jobs".into()); args.push(num.to_string().into()); } if self.log { args.push("--log".into()); } if let Some(num) = &self.latency_limit { args.push("--latency-limit".into()); args.push(num.to_string().into()); } if let Some(protocol) = &self.protocol { args.push("--protocol".into()); args.push(protocol.into()); } if self.no_vacuum_bench { args.push("--no-vacuum".into()); } if let Some(num) = &self.progress { args.push("--progress".into()); args.push(num.to_string().into()); } if self.report_per_command { args.push("--report-per-command".into()); } if let Some(num) = &self.rate { args.push("--rate".into()); args.push(num.to_string().into()); } if let Some(scale) = &self.scale_bench { args.push("--scale".into()); args.push(scale.to_string().into()); } if let Some(num) = &self.transactions { args.push("--transactions".into()); args.push(num.to_string().into()); } if let Some(num) = &self.time { args.push("--time".into()); args.push(num.to_string().into()); } if self.vacuum_all { args.push("--vacuum-all".into()); } if let Some(num) = &self.aggregate_interval { args.push("--aggregate-interval".into()); args.push(num.to_string().into()); } if self.failures_detailed { args.push("--failures-detailed".into()); } if let Some(prefix) = &self.log_prefix { args.push("--log-prefix".into()); args.push(prefix.into()); } if let Some(num) = &self.max_tries { args.push("--max-tries".into()); args.push(num.to_string().into()); } if self.progress_timestamp { args.push("--progress-timestamp".into()); } if let Some(seed) = &self.random_seed { args.push("--random-seed".into()); args.push(seed.into()); } if let Some(rate) = &self.sampling_rate { args.push("--sampling-rate".into()); args.push(rate.to_string().into()); } if let Some(name) = &self.show_script { args.push("--show-script".into()); args.push(name.into()); } if self.verbose_errors { args.push("--verbose-errors".into()); } if self.debug { args.push("--debug".into()); } if let Some(hostname) = &self.host { args.push("--host".into()); args.push(hostname.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PgBenchBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("pgbench"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PgBenchBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pgbench" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pgbench" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = PgBenchBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./pgbench" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\pgbench" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = PgBenchBuilder::new() .env("PGDATABASE", "database") .initialize() .init_steps("steps") .fill_factor(10) .no_vacuum() .quiet() .scale(10) .foreign_keys() .index_tablespace("tablespace") .partition_method("method") .partitions(10) .tablespace("tablespace") .unlogged_tables() .builtin("name") .file("filename") .skip_some_updates() .select_only() .client(10) .connect() .define("var") .jobs(10) .log() .latency_limit(10) .protocol("protocol") .no_vacuum_bench() .progress(10) .report_per_command() .rate(10) .scale_bench(10) .transactions(10) .time(10) .vacuum_all() .aggregate_interval(10) .failures_detailed() .log_prefix("prefix") .max_tries(10) .progress_timestamp() .random_seed("seed") .sampling_rate(10.0) .show_script("name") .verbose_errors() .debug() .host("localhost") .port(5432) .username("username") .version() .help() .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"pgbench" "--initialize" "--init-steps" "steps" "--fillfactor" "10" "--no-vacuum" "--quiet" "--scale" "10" "--foreign-keys" "--index-tablespace" "tablespace" "--partition-method" "method" "--partitions" "10" "--tablespace" "tablespace" "--unlogged-tables" "--builtin" "name" "--file" "filename" "--skip-some-updates" "--select-only" "--client" "10" "--connect" "--define" "var" "--jobs" "10" "--log" "--latency-limit" "10" "--protocol" "protocol" "--no-vacuum" "--progress" "10" "--report-per-command" "--rate" "10" "--scale" "10" "--transactions" "10" "--time" "10" "--vacuum-all" "--aggregate-interval" "10" "--failures-detailed" "--log-prefix" "prefix" "--max-tries" "10" "--progress-timestamp" "--random-seed" "seed" "--sampling-rate" "10" "--show-script" "name" "--verbose-errors" "--debug" "--host" "localhost" "--port" "5432" "--username" "username" "--version" "--help""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/postgres.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `postgres` is the `PostgreSQL` server. #[derive(Clone, Debug, Default)] pub struct PostgresBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, n_buffers: Option, runtime_param: Option<(OsString, OsString)>, print_runtime_param: Option, debugging_level: Option, data_dir: Option, european_date_format: bool, fsync_off: bool, host: Option, tcp_ip_connections: bool, socket_location: Option, max_connections: Option, port: Option, show_stats: bool, work_mem: Option, version: bool, describe_config: bool, help: bool, forbidden_plan_types: Option, allow_system_table_changes: bool, disable_system_indexes: bool, show_timings: Option, send_sigabrt: bool, wait_seconds: Option, single_user_mode: bool, dbname: Option, override_debugging_level: Option, echo_statement: bool, no_newline_delimiter: bool, output_file: Option, bootstrapping_mode: bool, check_mode: bool, } impl PostgresBuilder { /// Create a new [`PostgresBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PostgresBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.socket_location(socket_dir); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// number of shared buffers #[must_use] pub fn n_buffers(mut self, n_buffers: u32) -> Self { self.n_buffers = Some(n_buffers); self } /// set run-time parameter #[must_use] pub fn runtime_param>(mut self, name: S, value: S) -> Self { self.runtime_param = Some((name.as_ref().into(), value.as_ref().into())); self } /// print value of run-time parameter, then exit #[must_use] pub fn print_runtime_param>(mut self, name: S) -> Self { self.print_runtime_param = Some(name.as_ref().to_os_string()); self } /// debugging level #[must_use] pub fn debugging_level(mut self, level: u8) -> Self { self.debugging_level = Some(level); self } /// database directory #[must_use] pub fn data_dir>(mut self, dir: P) -> Self { self.data_dir = Some(dir.into()); self } /// use European date input format (DMY) #[must_use] pub fn european_date_format(mut self) -> Self { self.european_date_format = true; self } /// turn fsync off #[must_use] pub fn fsync_off(mut self) -> Self { self.fsync_off = true; self } /// host name or IP address to listen on #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// enable TCP/IP connections (deprecated) #[must_use] pub fn tcp_ip_connections(mut self) -> Self { self.tcp_ip_connections = true; self } /// Unix socket location #[must_use] pub fn socket_location>(mut self, dir: P) -> Self { self.socket_location = Some(dir.into()); self } /// maximum number of allowed connections #[must_use] pub fn max_connections(mut self, max: u32) -> Self { self.max_connections = Some(max); self } /// port number to listen on #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// show statistics after each query #[must_use] pub fn show_stats(mut self) -> Self { self.show_stats = true; self } /// set amount of memory for sorts (in kB) #[must_use] pub fn work_mem(mut self, mem: u32) -> Self { self.work_mem = Some(mem); self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// describe configuration parameters, then exit #[must_use] pub fn describe_config(mut self) -> Self { self.describe_config = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// forbid use of some plan types #[must_use] pub fn forbidden_plan_types>(mut self, types: S) -> Self { self.forbidden_plan_types = Some(types.as_ref().to_os_string()); self } /// allow system table structure changes #[must_use] pub fn allow_system_table_changes(mut self) -> Self { self.allow_system_table_changes = true; self } /// disable system indexes #[must_use] pub fn disable_system_indexes(mut self) -> Self { self.disable_system_indexes = true; self } /// show timings after each query #[must_use] pub fn show_timings>(mut self, timings: S) -> Self { self.show_timings = Some(timings.as_ref().to_os_string()); self } /// send SIGABRT to all backend processes if one dies #[must_use] pub fn send_sigabrt(mut self) -> Self { self.send_sigabrt = true; self } /// wait NUM seconds to allow attach from a debugger #[must_use] pub fn wait_seconds(mut self, seconds: u32) -> Self { self.wait_seconds = Some(seconds); self } /// selects single-user mode (must be first argument) #[must_use] pub fn single_user_mode(mut self) -> Self { self.single_user_mode = true; self } /// database name (defaults to user name) #[must_use] pub fn dbname>(mut self, dbname: S) -> Self { self.dbname = Some(dbname.as_ref().to_os_string()); self } /// override debugging level #[must_use] pub fn override_debugging_level(mut self, level: u8) -> Self { self.override_debugging_level = Some(level); self } /// echo statement before execution #[must_use] pub fn echo_statement(mut self) -> Self { self.echo_statement = true; self } /// do not use newline as interactive query delimiter #[must_use] pub fn no_newline_delimiter(mut self) -> Self { self.no_newline_delimiter = true; self } /// send stdout and stderr to given file #[must_use] pub fn output_file>(mut self, file: P) -> Self { self.output_file = Some(file.into()); self } /// selects bootstrapping mode (must be first argument) #[must_use] pub fn bootstrapping_mode(mut self) -> Self { self.bootstrapping_mode = true; self } /// selects check mode (must be first argument) #[must_use] pub fn check_mode(mut self) -> Self { self.check_mode = true; self } } impl CommandBuilder for PostgresBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "postgres".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command #[expect(clippy::too_many_lines)] fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(n_buffers) = &self.n_buffers { args.push("-B".into()); args.push(n_buffers.to_string().into()); } if let Some((name, value)) = &self.runtime_param { args.push("-c".into()); args.push(format!("{}={}", name.to_string_lossy(), value.to_string_lossy()).into()); } if let Some(name) = &self.print_runtime_param { args.push("-C".into()); args.push(name.into()); } if let Some(level) = &self.debugging_level { args.push("-d".into()); args.push(level.to_string().into()); } if let Some(data_dir) = &self.data_dir { args.push("-D".into()); args.push(data_dir.into()); } if self.european_date_format { args.push("-e".into()); } if self.fsync_off { args.push("-F".into()); } if let Some(host) = &self.host { args.push("-h".into()); args.push(host.into()); } if self.tcp_ip_connections { args.push("-i".into()); } if let Some(socket_location) = &self.socket_location { args.push("-k".into()); args.push(socket_location.into()); } if let Some(max) = &self.max_connections { args.push("-N".into()); args.push(max.to_string().into()); } if let Some(port) = &self.port { args.push("-p".into()); args.push(port.to_string().into()); } if self.show_stats { args.push("-s".into()); } if let Some(work_mem) = &self.work_mem { args.push("-S".into()); args.push(work_mem.to_string().into()); } if self.version { args.push("--version".into()); } if self.describe_config { args.push("--describe-config".into()); } if self.help { args.push("--help".into()); } if let Some(forbidden_plan_types) = &self.forbidden_plan_types { args.push("-f".into()); args.push(forbidden_plan_types.into()); } if self.allow_system_table_changes { args.push("-O".into()); } if self.disable_system_indexes { args.push("-P".into()); } if let Some(show_timings) = &self.show_timings { args.push("-t".into()); args.push(show_timings.into()); } if self.send_sigabrt { args.push("-T".into()); } if let Some(seconds) = &self.wait_seconds { args.push("-W".into()); args.push(seconds.to_string().into()); } if self.single_user_mode { args.push("--single".into()); } if let Some(dbname) = &self.dbname { args.push(dbname.into()); } if let Some(level) = &self.override_debugging_level { args.push("-d".into()); args.push(level.to_string().into()); } if self.echo_statement { args.push("-E".into()); } if self.no_newline_delimiter { args.push("-j".into()); } if let Some(file) = &self.output_file { args.push("-r".into()); args.push(file.into()); } if self.bootstrapping_mode { args.push("--boot".into()); } if self.check_mode { args.push("--check".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PostgresBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("postgres"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PostgresBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./postgres" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\postgres" "#; assert_eq!( format!(r#"{command_prefix}"-h" "localhost" "-p" "5432""#), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = PostgresBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#""./postgres" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\postgres" "#; assert_eq!( format!(r#"{command_prefix}"-h" "localhost" "-k" "/tmp/pg_socket" "-p" "5432""#), command.to_command_string() ); } #[test] fn test_builder() { let command = PostgresBuilder::new() .env("PGDATABASE", "database") .n_buffers(100) .runtime_param("name", "value") .print_runtime_param("name") .debugging_level(3) .data_dir("data_dir") .european_date_format() .fsync_off() .host("localhost") .tcp_ip_connections() .socket_location("socket_location") .max_connections(100) .port(5432) .show_stats() .work_mem(100) .version() .describe_config() .help() .forbidden_plan_types("type") .allow_system_table_changes() .disable_system_indexes() .show_timings("timings") .send_sigabrt() .wait_seconds(10) .single_user_mode() .dbname("dbname") .override_debugging_level(3) .echo_statement() .no_newline_delimiter() .output_file("output_file") .bootstrapping_mode() .check_mode() .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"postgres" "-B" "100" "-c" "name=value" "-C" "name" "-d" "3" "-D" "data_dir" "-e" "-F" "-h" "localhost" "-i" "-k" "socket_location" "-N" "100" "-p" "5432" "-s" "-S" "100" "--version" "--describe-config" "--help" "-f" "type" "-O" "-P" "-t" "timings" "-T" "-W" "10" "--single" "dbname" "-d" "3" "-E" "-j" "-r" "output_file" "--boot" "--check""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/psql.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `psql` is the `PostgreSQL` interactive terminal. #[derive(Clone, Debug, Default)] pub struct PsqlBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, command: Option, dbname: Option, file: Option, list: bool, variable: Option<(OsString, OsString)>, version: bool, no_psqlrc: bool, single_transaction: bool, help: Option, echo_all: bool, echo_errors: bool, echo_queries: bool, echo_hidden: bool, log_file: Option, no_readline: bool, output: Option, quiet: bool, single_step: bool, single_line: bool, no_align: bool, csv: bool, field_separator: Option, html: bool, pset: Option<(OsString, OsString)>, record_separator: Option, tuples_only: bool, table_attr: Option, expanded: bool, field_separator_zero: bool, record_separator_zero: bool, host: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, } impl PsqlBuilder { /// Create a new [`PsqlBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`PsqlBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// run only single command (SQL or internal) and exit #[must_use] pub fn command>(mut self, command: S) -> Self { self.command = Some(command.as_ref().to_os_string()); self } /// database name to connect to #[must_use] pub fn dbname>(mut self, dbname: S) -> Self { self.dbname = Some(dbname.as_ref().to_os_string()); self } /// execute commands from file, then exit #[must_use] pub fn file>(mut self, file: P) -> Self { self.file = Some(file.into()); self } /// list available databases, then exit #[must_use] pub fn list(mut self) -> Self { self.list = true; self } /// set psql variable NAME to VALUE (e.g., `-v ON_ERROR_STOP=1`) #[must_use] pub fn variable>(mut self, variable: (S, S)) -> Self { let (name, value) = variable; self.variable = Some((name.as_ref().into(), value.as_ref().into())); self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// do not read startup file (~/.psqlrc) #[must_use] pub fn no_psqlrc(mut self) -> Self { self.no_psqlrc = true; self } /// execute as a single transaction (if non-interactive) #[must_use] pub fn single_transaction(mut self) -> Self { self.single_transaction = true; self } /// show help, then exit /// Possible values: [options, commands, variables] #[must_use] pub fn help>(mut self, help: S) -> Self { self.help = Some(help.as_ref().to_os_string()); self } /// echo all input from script #[must_use] pub fn echo_all(mut self) -> Self { self.echo_all = true; self } /// echo failed commands #[must_use] pub fn echo_errors(mut self) -> Self { self.echo_errors = true; self } /// echo commands sent to server #[must_use] pub fn echo_queries(mut self) -> Self { self.echo_queries = true; self } /// display queries that internal commands generate #[must_use] pub fn echo_hidden(mut self) -> Self { self.echo_hidden = true; self } /// send session log to file #[must_use] pub fn log_file>(mut self, log_file: P) -> Self { self.log_file = Some(log_file.into()); self } /// disable enhanced command line editing (readline) #[must_use] pub fn no_readline(mut self) -> Self { self.no_readline = true; self } /// send query results to file (or |pipe) #[must_use] pub fn output>(mut self, output: P) -> Self { self.output = Some(output.into()); self } /// run quietly (no messages, only query output) #[must_use] pub fn quiet(mut self) -> Self { self.quiet = true; self } /// single-step mode (confirm each query) #[must_use] pub fn single_step(mut self) -> Self { self.single_step = true; self } /// single-line mode (end of line terminates SQL command) #[must_use] pub fn single_line(mut self) -> Self { self.single_line = true; self } /// unaligned table output mode #[must_use] pub fn no_align(mut self) -> Self { self.no_align = true; self } /// CSV (Comma-Separated Values) table output mode #[must_use] pub fn csv(mut self) -> Self { self.csv = true; self } /// field separator for unaligned output (default: "|") #[must_use] pub fn field_separator>(mut self, field_separator: S) -> Self { self.field_separator = Some(field_separator.as_ref().to_os_string()); self } /// HTML table output mode #[must_use] pub fn html(mut self) -> Self { self.html = true; self } /// set printing option VAR to ARG (see \pset command) #[must_use] pub fn pset>(mut self, pset: (S, S)) -> Self { let (var, arg) = pset; self.pset = Some((var.as_ref().into(), arg.as_ref().into())); self } /// record separator for unaligned output (default: newline) #[must_use] pub fn record_separator>(mut self, record_separator: S) -> Self { self.record_separator = Some(record_separator.as_ref().to_os_string()); self } /// print rows only #[must_use] pub fn tuples_only(mut self) -> Self { self.tuples_only = true; self } /// set HTML table tag attributes (e.g., width, border) #[must_use] pub fn table_attr>(mut self, table_attr: S) -> Self { self.table_attr = Some(table_attr.as_ref().to_os_string()); self } /// turn on expanded table output #[must_use] pub fn expanded(mut self) -> Self { self.expanded = true; self } /// set field separator for unaligned output to zero byte #[must_use] pub fn field_separator_zero(mut self) -> Self { self.field_separator_zero = true; self } /// set record separator for unaligned output to zero byte #[must_use] pub fn record_separator_zero(mut self) -> Self { self.record_separator_zero = true; self } /// database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// database server port #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// database user name #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// force password prompt (should happen automatically) #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } } impl CommandBuilder for PsqlBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "psql".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command #[expect(clippy::too_many_lines)] fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(psql_command) = &self.command { args.push("--command".into()); args.push(psql_command.into()); } if let Some(dbname) = &self.dbname { args.push("--dbname".into()); args.push(dbname.into()); } if let Some(file) = &self.file { args.push("--file".into()); args.push(file.into()); } if self.list { args.push("--list".into()); } if let Some((name, value)) = &self.variable { args.push("--variable".into()); args.push(format!("{}={}", name.to_string_lossy(), value.to_string_lossy()).into()); } if self.version { args.push("--version".into()); } if self.no_psqlrc { args.push("--no-psqlrc".into()); } if self.single_transaction { args.push("--single-transaction".into()); } if let Some(help) = &self.help { args.push("--help".into()); args.push(help.into()); } if self.echo_all { args.push("--echo-all".into()); } if self.echo_errors { args.push("--echo-errors".into()); } if self.echo_queries { args.push("--echo-queries".into()); } if self.echo_hidden { args.push("--echo-hidden".into()); } if let Some(log_file) = &self.log_file { args.push("--log-file".into()); args.push(log_file.into()); } if self.no_readline { args.push("--no-readline".into()); } if let Some(output) = &self.output { args.push("--output".into()); args.push(output.into()); } if self.quiet { args.push("--quiet".into()); } if self.single_step { args.push("--single-step".into()); } if self.single_line { args.push("--single-line".into()); } if self.no_align { args.push("--no-align".into()); } if self.csv { args.push("--csv".into()); } if let Some(field_separator) = &self.field_separator { args.push("--field-separator".into()); args.push(field_separator.into()); } if self.html { args.push("--html".into()); } if let Some((var, arg)) = &self.pset { args.push("--pset".into()); args.push(format!("{}={}", var.to_string_lossy(), arg.to_string_lossy()).into()); } if let Some(record_separator) = &self.record_separator { args.push("--record-separator".into()); args.push(record_separator.into()); } if self.tuples_only { args.push("--tuples-only".into()); } if let Some(table_attr) = &self.table_attr { args.push("--table-attr".into()); args.push(table_attr.into()); } if self.expanded { args.push("--expanded".into()); } if self.field_separator_zero { args.push("--field-separator-zero".into()); } if self.record_separator_zero { args.push("--record-separator-zero".into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = PsqlBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("psql"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = PsqlBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./psql" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\psql" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = PsqlBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./psql" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\psql" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = PsqlBuilder::new() .env("PGDATABASE", "database") .command("SELECT * FROM test") .dbname("dbname") .file("test.sql") .list() .variable(("ON_ERROR_STOP", "1")) .version() .no_psqlrc() .single_transaction() .help("options") .echo_all() .echo_errors() .echo_queries() .echo_hidden() .log_file("psql.log") .no_readline() .output("output.txt") .quiet() .single_step() .single_line() .no_align() .csv() .field_separator("|") .html() .pset(("border", "1")) .record_separator("\n") .tuples_only() .table_attr("width=100") .expanded() .field_separator_zero() .record_separator_zero() .host("localhost") .port(5432) .username("postgres") .no_password() .password() .pg_password("password") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"psql" "--command" "SELECT * FROM test" "--dbname" "dbname" "--file" "test.sql" "--list" "--variable" "ON_ERROR_STOP=1" "--version" "--no-psqlrc" "--single-transaction" "--help" "options" "--echo-all" "--echo-errors" "--echo-queries" "--echo-hidden" "--log-file" "psql.log" "--no-readline" "--output" "output.txt" "--quiet" "--single-step" "--single-line" "--no-align" "--csv" "--field-separator" "|" "--html" "--pset" "border=1" "--record-separator" "\n" "--tuples-only" "--table-attr" "width=100" "--expanded" "--field-separator-zero" "--record-separator-zero" "--host" "localhost" "--port" "5432" "--username" "postgres" "--no-password" "--password""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/reindexdb.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `reindexdb` reindexes a `PostgreSQL` database. #[derive(Clone, Debug, Default)] pub struct ReindexDbBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, all: bool, concurrently: bool, dbname: Option, echo: bool, index: Option, jobs: Option, quiet: bool, system: bool, schema: Option, table: Option, tablespace: Option, verbose: bool, version: bool, help: bool, host: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, maintenance_db: Option, } impl ReindexDbBuilder { /// Create a new [`ReindexDbBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`ReindexDbBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// reindex all databases #[must_use] pub fn all(mut self) -> Self { self.all = true; self } /// reindex concurrently #[must_use] pub fn concurrently(mut self) -> Self { self.concurrently = true; self } /// database to reindex #[must_use] pub fn dbname>(mut self, dbname: S) -> Self { self.dbname = Some(dbname.as_ref().to_os_string()); self } /// show the commands being sent to the server #[must_use] pub fn echo(mut self) -> Self { self.echo = true; self } /// recreate specific index(es) only #[must_use] pub fn index>(mut self, index: S) -> Self { self.index = Some(index.as_ref().to_os_string()); self } /// use this many concurrent connections to reindex #[must_use] pub fn jobs(mut self, jobs: u32) -> Self { self.jobs = Some(jobs); self } /// don't write any messages #[must_use] pub fn quiet(mut self) -> Self { self.quiet = true; self } /// reindex system catalogs only #[must_use] pub fn system(mut self) -> Self { self.system = true; self } /// reindex specific schema(s) only #[must_use] pub fn schema>(mut self, schema: S) -> Self { self.schema = Some(schema.as_ref().to_os_string()); self } /// reindex specific table(s) only #[must_use] pub fn table>(mut self, table: S) -> Self { self.table = Some(table.as_ref().to_os_string()); self } /// tablespace where indexes are rebuilt #[must_use] pub fn tablespace>(mut self, tablespace: S) -> Self { self.tablespace = Some(tablespace.as_ref().to_os_string()); self } /// write a lot of output #[must_use] pub fn verbose(mut self) -> Self { self.verbose = true; self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// database server port #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// user name to connect as #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// force password prompt #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } /// alternate maintenance database #[must_use] pub fn maintenance_db>(mut self, maintenance_db: S) -> Self { self.maintenance_db = Some(maintenance_db.as_ref().to_os_string()); self } } impl CommandBuilder for ReindexDbBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "reindexdb".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.all { args.push("--all".into()); } if self.concurrently { args.push("--concurrently".into()); } if let Some(dbname) = &self.dbname { args.push("--dbname".into()); args.push(dbname.into()); } if self.echo { args.push("--echo".into()); } if let Some(index) = &self.index { args.push("--index".into()); args.push(index.into()); } if let Some(jobs) = &self.jobs { args.push("--jobs".into()); args.push(jobs.to_string().into()); } if self.quiet { args.push("--quiet".into()); } if self.system { args.push("--system".into()); } if let Some(schema) = &self.schema { args.push("--schema".into()); args.push(schema.into()); } if let Some(table) = &self.table { args.push("--table".into()); args.push(table.into()); } if let Some(tablespace) = &self.tablespace { args.push("--tablespace".into()); args.push(tablespace.into()); } if self.verbose { args.push("--verbose".into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } if let Some(maintenance_db) = &self.maintenance_db { args.push("--maintenance-db".into()); args.push(maintenance_db.into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = ReindexDbBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("reindexdb"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = ReindexDbBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./reindexdb" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\reindexdb" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = ReindexDbBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./reindexdb" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\reindexdb" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = ReindexDbBuilder::new() .env("PGDATABASE", "database") .all() .concurrently() .dbname("dbname") .echo() .index("index") .jobs(1) .quiet() .system() .schema("schema") .table("table") .tablespace("tablespace") .verbose() .version() .help() .host("localhost") .port(5432) .username("username") .no_password() .password() .pg_password("password") .maintenance_db("maintenance-db") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"reindexdb" "--all" "--concurrently" "--dbname" "dbname" "--echo" "--index" "index" "--jobs" "1" "--quiet" "--system" "--schema" "schema" "--table" "table" "--tablespace" "tablespace" "--verbose" "--version" "--help" "--host" "localhost" "--port" "5432" "--username" "username" "--no-password" "--password" "--maintenance-db" "maintenance-db""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/traits.rs ================================================ use crate::error::{Error, Result}; use std::env::consts::OS; use std::ffi::{OsStr, OsString}; use std::fmt::Debug; #[cfg(target_os = "windows")] use std::os::windows::process::CommandExt; use std::path::PathBuf; use std::process::ExitStatus; use std::time::Duration; use tracing::debug; /// Constant for the `CREATE_NO_WINDOW` flag on Windows to prevent the creation of a console window /// when executing commands. This is useful for background processes or services that do not require /// user interaction. /// /// # References /// /// - [Windows API: Process Creation Flags](https://learn.microsoft.com/en-us/windows/win32/procthread/process-creation-flags#flags) #[cfg(target_os = "windows")] const CREATE_NO_WINDOW: u32 = 0x0800_0000; /// Interface for `PostgreSQL` settings pub trait Settings { /// Get the directory where the PostgreSQL binaries are located. fn get_binary_dir(&self) -> PathBuf; /// Get the host for the PostgreSQL connection. fn get_host(&self) -> OsString; /// Get the port for the PostgreSQL connection. fn get_port(&self) -> u16; /// Get the username for the PostgreSQL connection. fn get_username(&self) -> OsString; /// Get the password for the PostgreSQL connection. fn get_password(&self) -> OsString; /// Get the Unix socket directory, if configured. /// Returns `None` when using TCP/IP connections (the default). fn get_socket_dir(&self) -> Option { None } } #[cfg(test)] pub struct TestSettings; #[cfg(test)] impl Settings for TestSettings { fn get_binary_dir(&self) -> PathBuf { PathBuf::from(".") } fn get_host(&self) -> OsString { "localhost".into() } fn get_port(&self) -> u16 { 5432 } fn get_username(&self) -> OsString { "postgres".into() } fn get_password(&self) -> OsString { "password".into() } } /// Test settings that include a Unix socket directory #[cfg(test)] pub struct TestSocketSettings; #[cfg(test)] impl Settings for TestSocketSettings { fn get_binary_dir(&self) -> PathBuf { PathBuf::from(".") } fn get_host(&self) -> OsString { "localhost".into() } fn get_port(&self) -> u16 { 5432 } fn get_username(&self) -> OsString { "postgres".into() } fn get_password(&self) -> OsString { "password".into() } fn get_socket_dir(&self) -> Option { Some(PathBuf::from("/tmp/pg_socket")) } } /// Trait to build a command pub trait CommandBuilder: Debug { /// Get the program name fn get_program(&self) -> &'static OsStr; /// Location of the program binary fn get_program_dir(&self) -> &Option; /// Fully qualified path to the program binary fn get_program_file(&self) -> PathBuf { let program_name = &self.get_program(); match self.get_program_dir() { Some(program_dir) => program_dir.join(program_name), None => PathBuf::from(program_name), } } /// Get the arguments for the command fn get_args(&self) -> Vec { vec![] } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)>; /// Set an environment variable for the command #[must_use] fn env>(self, key: S, value: S) -> Self; /// Build a standard Command fn build(self) -> std::process::Command where Self: Sized, { let program_file = self.get_program_file(); let mut command = std::process::Command::new(program_file); #[cfg(target_os = "windows")] { command.creation_flags(CREATE_NO_WINDOW); } command.args(self.get_args()); command.envs(self.get_envs()); command } #[cfg(feature = "tokio")] /// Build a tokio Command fn build_tokio(self) -> tokio::process::Command where Self: Sized, { let program_file = self.get_program_file(); let mut command = tokio::process::Command::new(program_file); #[cfg(target_os = "windows")] { command.creation_flags(CREATE_NO_WINDOW); } command.args(self.get_args()); command.envs(self.get_envs()); command } } /// Trait to convert a command to a string representation pub trait CommandToString { fn to_command_string(&self) -> String; } /// Implement the [`CommandToString`] trait for [`Command`](std::process::Command) impl CommandToString for std::process::Command { fn to_command_string(&self) -> String { format!("{self:?}") } } #[cfg(feature = "tokio")] /// Implement the [`CommandToString`] trait for [`Command`](tokio::process::Command) impl CommandToString for tokio::process::Command { fn to_command_string(&self) -> String { format!("{self:?}") .replace("Command { std: ", "") .replace(", kill_on_drop: false }", "") } } /// Interface for executing a command pub trait CommandExecutor { /// Execute the command and return the stdout and stderr /// /// # Errors /// /// Returns an error if the command fails fn execute(&mut self) -> Result<(String, String)>; } /// Interface for executing a command pub trait AsyncCommandExecutor { /// Execute the command and return the stdout and stderr #[expect(async_fn_in_trait)] async fn execute(&mut self, timeout: Option) -> Result<(String, String)>; } /// Implement the [`CommandExecutor`] trait for [`Command`](std::process::Command) impl CommandExecutor for std::process::Command { /// Execute the command and return the stdout and stderr fn execute(&mut self) -> Result<(String, String)> { debug!("Executing command: {}", self.to_command_string()); let program = self.get_program().to_string_lossy().to_string(); let stdout: String; let stderr: String; let status: ExitStatus; if OS == "windows" && program.as_str().ends_with("pg_ctl") { // The pg_ctl process can hang on Windows when attempting to get stdout/stderr. let mut process = self .stdout(std::process::Stdio::piped()) .stderr(std::process::Stdio::piped()) .spawn()?; stdout = String::new(); stderr = String::new(); status = process.wait()?; } else { let output = self.output()?; stdout = String::from_utf8_lossy(&output.stdout).into_owned(); stderr = String::from_utf8_lossy(&output.stderr).into_owned(); status = output.status; } debug!( "Result: {}\nstdout: {}\nstderr: {}", status.code().map_or("None".to_string(), |c| c.to_string()), stdout, stderr ); if status.success() { Ok((stdout, stderr)) } else { Err(Error::CommandError { stdout, stderr }) } } } #[cfg(feature = "tokio")] /// Implement the [`CommandExecutor`] trait for [`Command`](tokio::process::Command) impl AsyncCommandExecutor for tokio::process::Command { /// Execute the command and return the stdout and stderr async fn execute(&mut self, timeout: Option) -> Result<(String, String)> { debug!("Executing command: {}", self.to_command_string()); let program = self.as_std().get_program().to_string_lossy().to_string(); let stdout: String; let stderr: String; let status: ExitStatus; if OS == "windows" && program.as_str().ends_with("pg_ctl") { // The pg_ctl process can hang on Windows when attempting to get stdout/stderr. let mut process = self .stdout(std::process::Stdio::piped()) .stderr(std::process::Stdio::piped()) .spawn()?; stdout = String::new(); stderr = String::new(); status = process.wait().await?; } else { let output = match timeout { Some(duration) => tokio::time::timeout(duration, self.output()).await?, None => self.output().await, }?; stdout = String::from_utf8_lossy(&output.stdout).into_owned(); stderr = String::from_utf8_lossy(&output.stderr).into_owned(); status = output.status; } debug!( "Result: {}\nstdout: {}\nstderr: {}", status.code().map_or("None".to_string(), |c| c.to_string()), stdout, stderr ); if status.success() { Ok((stdout, stderr)) } else { Err(Error::CommandError { stdout, stderr }) } } } #[cfg(test)] mod test { use super::*; use test_log::test; #[test] fn test_command_builder_defaults() { #[derive(Debug, Default)] struct DefaultCommandBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, } impl CommandBuilder for DefaultCommandBuilder { fn get_program(&self) -> &'static OsStr { "test".as_ref() } fn get_program_dir(&self) -> &Option { &self.program_dir } fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } let builder = DefaultCommandBuilder::default(); let command = builder.env("ENV", "foo").build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"ENV="foo" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!(r#"{command_prefix}"test""#), command.to_command_string() ); } #[derive(Debug)] struct TestCommandBuilder { program_dir: Option, args: Vec, envs: Vec<(OsString, OsString)>, } impl CommandBuilder for TestCommandBuilder { fn get_program(&self) -> &'static OsStr { "test".as_ref() } fn get_program_dir(&self) -> &Option { &self.program_dir } fn get_args(&self) -> Vec { self.args.clone() } fn get_envs(&self) -> Vec<(OsString, OsString)> { self.envs.clone() } fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[test] fn test_standard_command_builder() { let builder = TestCommandBuilder { program_dir: None, args: vec!["--help".to_string().into()], envs: vec![], }; let command = builder.env("PASSWORD", "foo").build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PASSWORD="foo" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"{}" "--help""#, PathBuf::from("test").to_string_lossy() ), command.to_command_string() ); } #[cfg(feature = "tokio")] #[test] fn test_tokio_command_builder() { let builder = TestCommandBuilder { program_dir: None, args: vec!["--help".to_string().into()], envs: vec![], }; let command = builder.env("PASSWORD", "foo").build_tokio(); assert_eq!( format!( r#"PASSWORD="foo" "{}" "--help""#, PathBuf::from("test").to_string_lossy() ), command.to_command_string() ); } #[test] fn test_standard_to_command_string() { let mut command = std::process::Command::new("test"); command.arg("-l"); assert_eq!(r#""test" "-l""#, command.to_command_string(),); } #[cfg(feature = "tokio")] #[test] fn test_tokio_to_command_string() { let mut command = tokio::process::Command::new("test"); command.arg("-l"); assert_eq!(r#""test" "-l""#, command.to_command_string(),); } #[test(tokio::test)] async fn test_standard_command_execute() -> Result<()> { #[cfg(not(target_os = "windows"))] let mut command = std::process::Command::new("sh"); #[cfg(not(target_os = "windows"))] command.args(["-c", "echo foo"]); #[cfg(target_os = "windows")] let mut command = std::process::Command::new("cmd"); #[cfg(target_os = "windows")] command.args(["/C", "echo foo"]); let (stdout, stderr) = command.execute()?; assert!(stdout.starts_with("foo")); assert!(stderr.is_empty()); Ok(()) } #[test(tokio::test)] async fn test_standard_command_execute_error() { let mut command = std::process::Command::new("bogus_command"); assert!(command.execute().is_err()); } #[cfg(feature = "tokio")] #[test(tokio::test)] async fn test_tokio_command_execute() -> Result<()> { #[cfg(not(target_os = "windows"))] let mut command = tokio::process::Command::new("sh"); #[cfg(not(target_os = "windows"))] command.args(["-c", "echo foo"]); #[cfg(target_os = "windows")] let mut command = tokio::process::Command::new("cmd"); #[cfg(target_os = "windows")] command.args(["/C", "echo foo"]); let (stdout, stderr) = command.execute(None).await?; assert!(stdout.starts_with("foo")); assert!(stderr.is_empty()); Ok(()) } #[cfg(feature = "tokio")] #[test(tokio::test)] async fn test_tokio_command_execute_error() -> Result<()> { let mut command = tokio::process::Command::new("bogus_command"); assert!(command.execute(None).await.is_err()); Ok(()) } } ================================================ FILE: postgresql_commands/src/vacuumdb.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `vacuumdb` cleans and analyzes a `PostgreSQL` database. #[derive(Clone, Debug, Default)] pub struct VacuumDbBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, all: bool, buffer_usage_limit: Option, dbname: Option, disable_page_skipping: bool, echo: bool, full: bool, freeze: bool, force_index_cleanup: bool, jobs: Option, min_mxid_age: Option, min_xid_age: Option, no_index_cleanup: bool, no_process_main: bool, no_process_toast: bool, no_truncate: bool, schema: Option, exclude_schema: Option, parallel: Option, quiet: bool, skip_locked: bool, table: Option, verbose: bool, version: bool, analyze: bool, analyze_only: bool, analyze_in_stages: bool, help: bool, host: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, maintenance_db: Option, } /// vacuumdb cleans and analyzes a `PostgreSQL` database. impl VacuumDbBuilder { /// Create a new [`VacuumDbBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`VacuumDbBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// vacuum all databases #[must_use] pub fn all(mut self) -> Self { self.all = true; self } /// size of ring buffer used for vacuum #[must_use] pub fn buffer_usage_limit>(mut self, buffer_usage_limit: S) -> Self { self.buffer_usage_limit = Some(buffer_usage_limit.as_ref().to_os_string()); self } /// database to vacuum #[must_use] pub fn dbname>(mut self, dbname: S) -> Self { self.dbname = Some(dbname.as_ref().to_os_string()); self } /// disable all page-skipping behavior #[must_use] pub fn disable_page_skipping(mut self) -> Self { self.disable_page_skipping = true; self } /// show the commands being sent to the server #[must_use] pub fn echo(mut self) -> Self { self.echo = true; self } /// do full vacuuming #[must_use] pub fn full(mut self) -> Self { self.full = true; self } /// freeze row transaction information #[must_use] pub fn freeze(mut self) -> Self { self.freeze = true; self } /// always remove index entries that point to dead tuples #[must_use] pub fn force_index_cleanup(mut self) -> Self { self.force_index_cleanup = true; self } /// use this many concurrent connections to vacuum #[must_use] pub fn jobs(mut self, jobs: u32) -> Self { self.jobs = Some(jobs); self } /// minimum multixact ID age of tables to vacuum #[must_use] pub fn min_mxid_age>(mut self, min_mxid_age: S) -> Self { self.min_mxid_age = Some(min_mxid_age.as_ref().to_os_string()); self } /// minimum transaction ID age of tables to vacuum #[must_use] pub fn min_xid_age>(mut self, min_xid_age: S) -> Self { self.min_xid_age = Some(min_xid_age.as_ref().to_os_string()); self } /// don't remove index entries that point to dead tuples #[must_use] pub fn no_index_cleanup(mut self) -> Self { self.no_index_cleanup = true; self } /// skip the main relation #[must_use] pub fn no_process_main(mut self) -> Self { self.no_process_main = true; self } /// skip the TOAST table associated with the table to vacuum #[must_use] pub fn no_process_toast(mut self) -> Self { self.no_process_toast = true; self } /// don't truncate empty pages at the end of the table #[must_use] pub fn no_truncate(mut self) -> Self { self.no_truncate = true; self } /// vacuum tables in the specified schema(s) only #[must_use] pub fn schema>(mut self, schema: S) -> Self { self.schema = Some(schema.as_ref().to_os_string()); self } /// do not vacuum tables in the specified schema(s) #[must_use] pub fn exclude_schema>(mut self, exclude_schema: S) -> Self { self.exclude_schema = Some(exclude_schema.as_ref().to_os_string()); self } /// use this many background workers for vacuum, if available #[must_use] pub fn parallel(mut self, parallel: u32) -> Self { self.parallel = Some(parallel); self } /// don't write any messages #[must_use] pub fn quiet(mut self) -> Self { self.quiet = true; self } /// skip relations that cannot be immediately locked #[must_use] pub fn skip_locked(mut self) -> Self { self.skip_locked = true; self } /// vacuum specific table(s) only #[must_use] pub fn table>(mut self, table: S) -> Self { self.table = Some(table.as_ref().to_os_string()); self } /// write a lot of output #[must_use] pub fn verbose(mut self) -> Self { self.verbose = true; self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// update optimizer statistics #[must_use] pub fn analyze(mut self) -> Self { self.analyze = true; self } /// only update optimizer statistics; no vacuum #[must_use] pub fn analyze_only(mut self) -> Self { self.analyze_only = true; self } /// only update optimizer statistics, in multiple stages for faster results; no vacuum #[must_use] pub fn analyze_in_stages(mut self) -> Self { self.analyze_in_stages = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// database server port #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// user name to connect as #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// force password prompt #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } /// alternate maintenance database #[must_use] pub fn maintenance_db>(mut self, maintenance_db: S) -> Self { self.maintenance_db = Some(maintenance_db.as_ref().to_os_string()); self } } impl CommandBuilder for VacuumDbBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "vacuumdb".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command #[expect(clippy::too_many_lines)] fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if self.all { args.push("--all".into()); } if let Some(buffer_usage_limit) = &self.buffer_usage_limit { args.push("--buffer-usage-limit".into()); args.push(buffer_usage_limit.into()); } if let Some(dbname) = &self.dbname { args.push("--dbname".into()); args.push(dbname.into()); } if self.disable_page_skipping { args.push("--disable-page-skipping".into()); } if self.echo { args.push("--echo".into()); } if self.full { args.push("--full".into()); } if self.freeze { args.push("--freeze".into()); } if self.force_index_cleanup { args.push("--force-index-cleanup".into()); } if let Some(jobs) = &self.jobs { args.push("--jobs".into()); args.push(jobs.to_string().into()); } if let Some(min_mxid_age) = &self.min_mxid_age { args.push("--min-mxid-age".into()); args.push(min_mxid_age.into()); } if let Some(min_xid_age) = &self.min_xid_age { args.push("--min-xid-age".into()); args.push(min_xid_age.into()); } if self.no_index_cleanup { args.push("--no-index-cleanup".into()); } if self.no_process_main { args.push("--no-process-main".into()); } if self.no_process_toast { args.push("--no-process-toast".into()); } if self.no_truncate { args.push("--no-truncate".into()); } if let Some(schema) = &self.schema { args.push("--schema".into()); args.push(schema.into()); } if let Some(exclude_schema) = &self.exclude_schema { args.push("--exclude-schema".into()); args.push(exclude_schema.into()); } if let Some(parallel) = &self.parallel { args.push("--parallel".into()); args.push(parallel.to_string().into()); } if self.quiet { args.push("--quiet".into()); } if self.skip_locked { args.push("--skip-locked".into()); } if let Some(table) = &self.table { args.push("--table".into()); args.push(table.into()); } if self.verbose { args.push("--verbose".into()); } if self.version { args.push("--version".into()); } if self.analyze { args.push("--analyze".into()); } if self.analyze_only { args.push("--analyze-only".into()); } if self.analyze_in_stages { args.push("--analyze-in-stages".into()); } if self.help { args.push("--help".into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } if let Some(maintenance_db) = &self.maintenance_db { args.push("--maintenance-db".into()); args.push(maintenance_db.into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = VacuumDbBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("vacuumdb"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = VacuumDbBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./vacuumdb" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\vacuumdb" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = VacuumDbBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./vacuumdb" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\vacuumdb" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = VacuumDbBuilder::new() .env("PGDATABASE", "database") .all() .buffer_usage_limit("buffer_usage_limit") .dbname("dbname") .disable_page_skipping() .echo() .full() .freeze() .force_index_cleanup() .jobs(1) .min_mxid_age("min_mxid_age") .min_xid_age("min_xid_age") .no_index_cleanup() .no_process_main() .no_process_toast() .no_truncate() .schema("schema") .exclude_schema("exclude_schema") .parallel(1) .quiet() .skip_locked() .table("table") .verbose() .version() .analyze() .analyze_only() .analyze_in_stages() .help() .host("localhost") .port(5432) .username("username") .no_password() .password() .pg_password("password") .maintenance_db("maintenance_db") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"vacuumdb" "--all" "--buffer-usage-limit" "buffer_usage_limit" "--dbname" "dbname" "--disable-page-skipping" "--echo" "--full" "--freeze" "--force-index-cleanup" "--jobs" "1" "--min-mxid-age" "min_mxid_age" "--min-xid-age" "min_xid_age" "--no-index-cleanup" "--no-process-main" "--no-process-toast" "--no-truncate" "--schema" "schema" "--exclude-schema" "exclude_schema" "--parallel" "1" "--quiet" "--skip-locked" "--table" "table" "--verbose" "--version" "--analyze" "--analyze-only" "--analyze-in-stages" "--help" "--host" "localhost" "--port" "5432" "--username" "username" "--no-password" "--password" "--maintenance-db" "maintenance_db""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_commands/src/vacuumlo.rs ================================================ use crate::Settings; use crate::traits::CommandBuilder; use std::convert::AsRef; use std::ffi::{OsStr, OsString}; use std::path::PathBuf; /// `vacuumlo` removes unreferenced large objects from databases. #[derive(Clone, Debug, Default)] pub struct VacuumLoBuilder { program_dir: Option, envs: Vec<(OsString, OsString)>, limit: Option, dry_run: bool, verbose: bool, version: bool, help: bool, host: Option, port: Option, username: Option, no_password: bool, password: bool, pg_password: Option, } impl VacuumLoBuilder { /// Create a new [`VacuumLoBuilder`] #[must_use] pub fn new() -> Self { Self::default() } /// Create a new [`VacuumLoBuilder`] from [Settings] pub fn from(settings: &dyn Settings) -> Self { let mut builder = Self::new() .program_dir(settings.get_binary_dir()) .host(settings.get_host()) .port(settings.get_port()) .username(settings.get_username()) .pg_password(settings.get_password()); if let Some(socket_dir) = settings.get_socket_dir() { builder = builder.host(socket_dir.to_string_lossy().to_string()); } builder } /// Location of the program binary #[must_use] pub fn program_dir>(mut self, path: P) -> Self { self.program_dir = Some(path.into()); self } /// commit after removing each LIMIT large objects #[must_use] pub fn limit(mut self, limit: usize) -> Self { self.limit = Some(limit); self } /// don't remove large objects, just show what would be done #[must_use] pub fn dry_run(mut self) -> Self { self.dry_run = true; self } /// write a lot of progress messages #[must_use] pub fn verbose(mut self) -> Self { self.verbose = true; self } /// output version information, then exit #[must_use] pub fn version(mut self) -> Self { self.version = true; self } /// show help, then exit #[must_use] pub fn help(mut self) -> Self { self.help = true; self } /// database server host or socket directory #[must_use] pub fn host>(mut self, host: S) -> Self { self.host = Some(host.as_ref().to_os_string()); self } /// database server port #[must_use] pub fn port(mut self, port: u16) -> Self { self.port = Some(port); self } /// user name to connect as #[must_use] pub fn username>(mut self, username: S) -> Self { self.username = Some(username.as_ref().to_os_string()); self } /// never prompt for password #[must_use] pub fn no_password(mut self) -> Self { self.no_password = true; self } /// force password prompt #[must_use] pub fn password(mut self) -> Self { self.password = true; self } /// user password #[must_use] pub fn pg_password>(mut self, pg_password: S) -> Self { self.pg_password = Some(pg_password.as_ref().to_os_string()); self } } impl CommandBuilder for VacuumLoBuilder { /// Get the program name fn get_program(&self) -> &'static OsStr { "vacuumlo".as_ref() } /// Location of the program binary fn get_program_dir(&self) -> &Option { &self.program_dir } /// Get the arguments for the command fn get_args(&self) -> Vec { let mut args: Vec = Vec::new(); if let Some(limit) = &self.limit { args.push("--limit".into()); args.push(limit.to_string().into()); } if self.dry_run { args.push("--dry-run".into()); } if self.verbose { args.push("--verbose".into()); } if self.version { args.push("--version".into()); } if self.help { args.push("--help".into()); } if let Some(host) = &self.host { args.push("--host".into()); args.push(host.into()); } if let Some(port) = &self.port { args.push("--port".into()); args.push(port.to_string().into()); } if let Some(username) = &self.username { args.push("--username".into()); args.push(username.into()); } if self.no_password { args.push("--no-password".into()); } if self.password { args.push("--password".into()); } args } /// Get the environment variables for the command fn get_envs(&self) -> Vec<(OsString, OsString)> { let mut envs: Vec<(OsString, OsString)> = self.envs.clone(); if let Some(password) = &self.pg_password { envs.push(("PGPASSWORD".into(), password.into())); } envs } /// Set an environment variable for the command fn env>(mut self, key: S, value: S) -> Self { self.envs .push((key.as_ref().to_os_string(), value.as_ref().to_os_string())); self } } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; use crate::TestSocketSettings; use crate::traits::CommandToString; use test_log::test; #[test] fn test_builder_new() { let command = VacuumLoBuilder::new().program_dir(".").build(); assert_eq!( PathBuf::from(".").join("vacuumlo"), PathBuf::from(command.to_command_string().replace('"', "")) ); } #[test] fn test_builder_from() { let command = VacuumLoBuilder::from(&TestSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./vacuumlo" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\vacuumlo" "#; assert_eq!( format!( r#"{command_prefix}"--host" "localhost" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder_from_socket() { let command = VacuumLoBuilder::from(&TestSocketSettings).build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGPASSWORD="password" "./vacuumlo" "#; #[cfg(target_os = "windows")] let command_prefix = r#"".\\vacuumlo" "#; assert_eq!( format!( r#"{command_prefix}"--host" "/tmp/pg_socket" "--port" "5432" "--username" "postgres""# ), command.to_command_string() ); } #[test] fn test_builder() { let command = VacuumLoBuilder::new() .env("PGDATABASE", "database") .limit(100) .dry_run() .verbose() .version() .help() .host("localhost") .port(5432) .username("postgres") .no_password() .password() .pg_password("password") .build(); #[cfg(not(target_os = "windows"))] let command_prefix = r#"PGDATABASE="database" PGPASSWORD="password" "#; #[cfg(target_os = "windows")] let command_prefix = String::new(); assert_eq!( format!( r#"{command_prefix}"vacuumlo" "--limit" "100" "--dry-run" "--verbose" "--version" "--help" "--host" "localhost" "--port" "5432" "--username" "postgres" "--no-password" "--password""# ), command.to_command_string() ); } } ================================================ FILE: postgresql_embedded/Cargo.toml ================================================ [package] authors.workspace = true build = "build/build.rs" categories.workspace = true description = "Install and run a PostgreSQL database locally on Linux, MacOS or Windows. PostgreSQL can be bundled with your application, or downloaded on demand." edition.workspace = true keywords.workspace = true license.workspace = true name = "postgresql_embedded" repository = "https://github.com/theseus-rs/postgresql-embedded" rust-version.workspace = true version.workspace = true [build-dependencies] anyhow = { workspace = true } postgresql_archive = { path = "../postgresql_archive", version = "0.20.2", default-features = false } target-triple = { workspace = true } tokio = { workspace = true, features = ["full"] } url = { workspace = true } [dependencies] postgresql_archive = { path = "../postgresql_archive", version = "0.20.2", default-features = false } postgresql_commands = { path = "../postgresql_commands", version = "0.20.2" } rand = { workspace = true } semver = { workspace = true } sqlx = { workspace = true, features = ["runtime-tokio"] } tempfile = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"], optional = true } tracing = { workspace = true, features = ["log"] } url = { workspace = true } [dev-dependencies] anyhow = { workspace = true } criterion = { workspace = true } test-log = { workspace = true } tokio = { workspace = true, features = ["full"] } [features] default = [ "native-tls", "theseus", ] blocking = ["tokio"] bundled = ["postgresql_archive/github"] indicatif = [ "postgresql_archive/indicatif", ] native-tls = [ "postgresql_archive/native-tls", "sqlx/tls-native-tls", ] rustls = [ "postgresql_archive/rustls", "sqlx/tls-rustls", ] theseus = [ "postgresql_archive/theseus", ] tokio = [ "dep:tokio", "postgresql_commands/tokio", "sqlx/runtime-tokio", ] zonky = [ "postgresql_archive/zonky", ] [package.metadata.release] dependent-version = "upgrade" [package.metadata.docs.rs] no-default-features = true features = ["blocking", "theseus", "tokio"] targets = ["x86_64-unknown-linux-gnu"] [[bench]] harness = false name = "embedded" ================================================ FILE: postgresql_embedded/README.md ================================================ # PostgreSQL Embedded [![ci](https://github.com/theseus-rs/postgresql-embedded/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/theseus-rs/postgresql-embedded/actions/workflows/ci.yml) [![Documentation](https://docs.rs/postgresql_embedded/badge.svg)](https://docs.rs/postgresql_embedded) [![Code Coverage](https://codecov.io/gh/theseus-rs/postgresql-embedded/branch/main/graph/badge.svg)](https://codecov.io/gh/theseus-rs/postgresql-embedded) [![Benchmarks](https://img.shields.io/badge/%F0%9F%90%B0_bencher-enabled-6ec241)](https://bencher.dev/perf/theseus-rs-postgresql-embedded) [![Latest version](https://img.shields.io/crates/v/postgresql_embedded.svg)](https://crates.io/crates/postgresql_embedded) [![License](https://img.shields.io/crates/l/postgresql_embedded)](https://github.com/theseus-rs/postgresql-embedded/tree/main/postgresql_embedded#license) [![Semantic Versioning](https://img.shields.io/badge/%E2%9A%99%EF%B8%8F_SemVer-2.0.0-blue)](https://semver.org/spec/v2.0.0.html) Install and run a PostgreSQL database locally on Linux, MacOS or Windows. PostgreSQL can be bundled with your application, or downloaded on demand. This library provides an embedded-like experience for PostgreSQL similar to what you would have with SQLite. This is accomplished by downloading and installing PostgreSQL during runtime. There is also a "bundled" feature that when enabled, will download the PostgreSQL installation archive at compile time, include it in your binary and install from the binary version at runtime. In either case, PostgreSQL will run in a separate process space. ## Features - installing and running PostgreSQL - running PostgreSQL on ephemeral ports - Unix socket support - async and blocking API - bundling the PostgreSQL archive in an executable - semantic version resolution - ability to configure PostgreSQL startup options - settings builder for fluent configuration - URL based configuration - choice of native-tls or rustls ## Examples ### Asynchronous API ```rust use postgresql_embedded::{PostgreSQL, Result}; #[tokio::main] async fn main() -> Result<()> { let mut postgresql = PostgreSQL::default(); postgresql.setup().await?; postgresql.start().await?; let database_name = "test"; postgresql.create_database(database_name).await?; postgresql.database_exists(database_name).await?; postgresql.drop_database(database_name).await?; postgresql.stop().await } ``` ### Synchronous API ```rust use postgresql_embedded::Result; use postgresql_embedded::blocking::PostgreSQL; fn main() -> Result<()> { let mut postgresql = PostgreSQL::default(); postgresql.setup()?; postgresql.start()?; let database_name = "test"; postgresql.create_database(database_name)?; postgresql.database_exists(database_name)?; postgresql.drop_database(database_name)?; postgresql.stop() } ``` ### Settings Builder ```rust use postgresql_embedded::{PostgreSQL, Result, SettingsBuilder}; #[tokio::main] async fn main() -> Result<()> { let settings = SettingsBuilder::new() .host("127.0.0.1") .port(5433) .username("admin") .password("secret") .temporary(false) .config("max_connections", "100") .build(); let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; postgresql.stop().await } ``` ### Unix Socket ```rust use postgresql_embedded::{PostgreSQL, Result, SettingsBuilder}; use std::path::PathBuf; #[tokio::main] async fn main() -> Result<()> { let settings = SettingsBuilder::new() .socket_dir(PathBuf::from("/tmp/pg_socket")) .build(); let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; let database_name = "test"; postgresql.create_database(database_name).await?; postgresql.database_exists(database_name).await?; postgresql.drop_database(database_name).await?; postgresql.stop().await } ``` ## Information During the build process, when the `bundled` feature is enabled, the PostgreSQL binaries are downloaded and included in the resulting binary. The version of the PostgreSQL binaries is determined by the `POSTGRESQL_VERSION` environment variable. If the `POSTGRESQL_VERSION` environment variable is not set, then `postgresql_archive::LATEST` will be used to determine the version of the PostgreSQL binaries to download. When downloading the theseus PostgreSQL binaries, either during build, or at runtime, the `GITHUB_TOKEN` environment variable can be set to a GitHub personal access token to increase the rate limit for downloading the PostgreSQL binaries. The `GITHUB_TOKEN` environment variable is not required. At runtime, the PostgreSQL binaries are cached by default in the following directories: - Unix: `$HOME/.theseus/postgresql` - Windows: `%USERPROFILE%\.theseus\postgresql` Performance can be improved by using a specific version of the PostgreSQL binaries (e.g. `=16.4.0`). After the first download, the PostgreSQL binaries will be cached and reused for subsequent runs. Further, the repository will no longer be queried to calculate the version match. ## Feature flags postgresql_embedded uses feature flags to address compile time and binary size uses. The following features are available: | Name | Description | Default? | |--------------|----------------------------------------------------------|----------| | `bundled` | Bundles the PostgreSQL archive into the resulting binary | No | | `blocking` | Enables the blocking API; requires `tokio` | No | | `indicatif` | Enables tracing-indcatif support | No | | `native-tls` | Enables native-tls support | Yes | | `rustls` | Enables rustls support | No | | `theseus` | Enables theseus PostgreSQL binaries | Yes | | `tokio` | Enables using tokio for async | No | | `zonky` | Enables zonky PostgreSQL binaries | No | ## Bundling PostgreSQL To bundle PostgreSQL with your application, you can enable the `bundled` feature. This will download the PostgreSQL archive at compile time and include it in your binary. You should specify the version of PostgreSQL to bundle by setting the environment variable `POSTGRESQL_VERSION` to a specific version, e.g. `=17.2.0`. In order to use the bundled PostgreSQL, you will also need to set an explicit matching version at runtime in `Settings`: ```rust use postgresql_embedded::{Result, Settings, VersionReq}; #[tokio::main] async fn main() -> Result<()> { let settings = Settings { version: VersionReq::from_str("=17.2.0")?, ..Default::default() }; Ok(()) } ``` The PostgreSQL binaries can also be obtained from a different GitHub source by setting the `POSTGRESQL_RELEASES_URL` environment variable. The repository must contain the releases with archives in same structure as [theseus-rs/postgresql_binaries](https://github.com/theseus-rs/postgresql-binaries). ## Notes Supports using PostgreSQL binaries from: * [theseus-rs/postgresql-binaries](https://github.com/theseus-rs/postgresql-binaries) (default) * [zonkyio/embedded-postgres-binaries](https://github.com/zonkyio/embedded-postgres-binaries) ## License Licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) at your option. ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. ================================================ FILE: postgresql_embedded/benches/embedded.rs ================================================ use criterion::{Criterion, criterion_group, criterion_main}; use postgresql_embedded::Result; use postgresql_embedded::blocking::PostgreSQL; use std::time::Duration; fn benchmarks(criterion: &mut Criterion) { bench_lifecycle(criterion).ok(); } fn bench_lifecycle(criterion: &mut Criterion) -> Result<()> { criterion.bench_function("lifecycle", |bencher| { bencher.iter(|| { lifecycle().ok(); }); }); Ok(()) } fn lifecycle() -> Result<()> { let mut postgresql = PostgreSQL::default(); postgresql.setup()?; postgresql.start()?; postgresql.stop() } criterion_group!( name = benches; config = Criterion::default() .measurement_time(Duration::from_secs(30)) .sample_size(10); targets = benchmarks ); criterion_main!(benches); ================================================ FILE: postgresql_embedded/build/build.rs ================================================ #[cfg(feature = "bundled")] mod bundle; use anyhow::Result; #[tokio::main] async fn main() -> Result<()> { #[cfg(feature = "bundled")] bundle::stage_postgresql_archive().await?; Ok(()) } ================================================ FILE: postgresql_embedded/build/bundle.rs ================================================ #![allow(dead_code)] use anyhow::Result; use postgresql_archive::configuration::{custom, theseus}; use postgresql_archive::repository::github::repository::GitHub; use postgresql_archive::{ExactVersion, Version, VersionReq, matcher}; use postgresql_archive::{get_archive, repository}; use std::fs::File; use std::io::Write; use std::path::PathBuf; use std::str::FromStr; use std::{env, fs}; use url::Url; /// Stage the PostgreSQL archive when the `bundled` feature is enabled so that /// it can be included in the final binary. This is useful for creating a /// self-contained binary that does not require the PostgreSQL archive to be /// downloaded at runtime. pub(crate) async fn stage_postgresql_archive() -> Result<()> { println!("cargo:rerun-if-env-changed=POSTGRESQL_VERSION"); println!("cargo:rerun-if-env-changed=POSTGRESQL_RELEASES_URL"); #[cfg(feature = "theseus")] let default_releases_url = postgresql_archive::configuration::theseus::URL.to_string(); #[cfg(not(feature = "theseus"))] let default_releases_url = String::new(); let releases_url = match env::var("POSTGRESQL_RELEASES_URL") { Ok(custom_url) if !default_releases_url.is_empty() => { register_custom_repository()?; custom_url } _ => { register_theseus_repository()?; default_releases_url } }; println!("PostgreSQL releases URL: {releases_url}"); let postgres_version_req = env::var("POSTGRESQL_VERSION").unwrap_or("*".to_string()); let version_req = VersionReq::from_str(postgres_version_req.as_str())?; println!("PostgreSQL version: {postgres_version_req}"); println!("Target: {}", target_triple::TARGET); let out_dir = PathBuf::from(env::var("OUT_DIR")?); println!("OUT_DIR: {out_dir:?}"); let mut archive_version_file = out_dir.clone(); archive_version_file.push("postgresql.version"); let mut archive_file = out_dir.clone(); archive_file.push("postgresql.tar.gz"); if archive_version_file.exists() && archive_file.exists() { println!("PostgreSQL archive exists: {archive_file:?}"); return Ok(()); } let (asset_version, archive) = if let Some(exact_version) = version_req.exact_version() { let cached_file = cached_archive_path(&exact_version); println!( "Cached file: {cached_file:?}; exists: {}", cached_file.exists() ); if cached_file.is_file() { println!("Using cached PostgreSQL archive: {cached_file:?}"); (exact_version, fs::read(&cached_file)?) } else { let (asset_version, archive) = get_archive(&releases_url, &version_req).await?; if let Some(parent) = cached_file.parent() { fs::create_dir_all(parent)?; } fs::write(&cached_file, &archive)?; println!("Cached PostgreSQL archive to: {cached_file:?}"); (asset_version, archive) } } else { get_archive(&releases_url, &version_req).await? }; fs::write(archive_version_file.clone(), asset_version.to_string())?; let mut file = File::create(archive_file.clone())?; file.write_all(&archive)?; file.sync_data()?; println!("PostgreSQL archive written to: {archive_file:?}"); Ok(()) } /// Returns the path for a cached archive. fn cached_archive_path(version: &Version) -> PathBuf { let home = std::env::home_dir().unwrap_or_else(|| env::current_dir().unwrap_or_default()); let target = target_triple::TARGET; home.join(".theseus") .join("postgresql") .join(format!("postgresql-{version}-{target}.tar.gz")) } fn supports_github_url(url: &str) -> postgresql_archive::Result { let parsed_url = Url::parse(url)?; let host = parsed_url.host_str().unwrap_or_default(); Ok(host.ends_with("github.com")) } fn register_custom_repository() -> Result<()> { repository::registry::register(supports_github_url, Box::new(GitHub::new))?; matcher::registry::register(supports_github_url, custom::matcher)?; Ok(()) } fn register_theseus_repository() -> Result<()> { repository::registry::register(supports_github_url, Box::new(GitHub::new))?; matcher::registry::register(supports_github_url, theseus::matcher)?; Ok(()) } ================================================ FILE: postgresql_embedded/src/blocking/mod.rs ================================================ mod postgresql; pub use postgresql::PostgreSQL; ================================================ FILE: postgresql_embedded/src/blocking/postgresql.rs ================================================ use crate::{Result, Settings, Status}; use std::sync::LazyLock; use tokio::runtime::Runtime; static RUNTIME: LazyLock = LazyLock::new(|| Runtime::new().unwrap()); /// `PostgreSQL` server #[derive(Clone, Debug, Default)] pub struct PostgreSQL { inner: crate::postgresql::PostgreSQL, } /// `PostgreSQL` server methods impl PostgreSQL { /// Create a new [`crate::postgresql::PostgreSQL`] instance #[must_use] pub fn new(settings: Settings) -> Self { Self { inner: crate::postgresql::PostgreSQL::new(settings), } } /// Get the [status](Status) of the `PostgreSQL` server #[must_use] pub fn status(&self) -> Status { self.inner.status() } /// Get the [settings](Settings) of the `PostgreSQL` server #[must_use] pub fn settings(&self) -> &Settings { self.inner.settings() } /// Set up the database by extracting the archive and initializing the database. /// If the installation directory already exists, the archive will not be extracted. /// If the data directory already exists, the database will not be initialized. /// /// # Errors /// /// Returns an error if the setup fails. pub fn setup(&mut self) -> Result<()> { RUNTIME .handle() .block_on(async move { self.inner.setup().await }) } /// Start the database and wait for the startup to complete. /// If the port is set to `0`, the database will be started on a random port. /// /// # Errors /// /// Returns an error if the startup fails. pub fn start(&mut self) -> Result<()> { RUNTIME .handle() .block_on(async move { self.inner.start().await }) } /// Stop the database gracefully (smart mode) and wait for the shutdown to complete. /// /// # Errors /// /// Returns an error if the shutdown fails. pub fn stop(&self) -> Result<()> { RUNTIME .handle() .block_on(async move { self.inner.stop().await }) } /// Create a new database with the given name. /// /// # Errors /// /// Returns an error if the database creation fails. pub fn create_database(&self, database_name: S) -> Result<()> where S: AsRef + std::fmt::Debug, { RUNTIME .handle() .block_on(async move { self.inner.create_database(database_name).await }) } /// Check if a database with the given name exists. /// /// # Errors /// /// Returns an error if the database existence check fails. pub fn database_exists(&self, database_name: S) -> Result where S: AsRef + std::fmt::Debug, { RUNTIME .handle() .block_on(async move { self.inner.database_exists(database_name).await }) } /// Drop a database with the given name. /// /// # Errors /// /// Returns an error if the database drop fails. pub fn drop_database(&self, database_name: S) -> Result<()> where S: AsRef + std::fmt::Debug, { RUNTIME .handle() .block_on(async move { self.inner.drop_database(database_name).await }) } } #[cfg(test)] mod test { use super::*; use crate::VersionReq; #[test] fn test_postgresql() -> Result<()> { let version = VersionReq::parse("=16.4.0")?; let settings = Settings { version, ..Settings::default() }; let postgresql = PostgreSQL::new(settings); let initial_statuses = [Status::NotInstalled, Status::Installed, Status::Stopped]; assert!(initial_statuses.contains(&postgresql.status())); Ok(()) } } ================================================ FILE: postgresql_embedded/src/error.rs ================================================ use std::string::FromUtf8Error; /// `PostgreSQL` embedded result type pub type Result = core::result::Result; /// Errors that can occur when using `PostgreSQL` embedded #[derive(Debug, thiserror::Error)] pub enum Error { /// Error when `PostgreSQL` archive operations fail #[error(transparent)] ArchiveError(postgresql_archive::Error), /// Error when a command fails #[error("Command error: stdout={stdout}; stderr={stderr}")] CommandError { stdout: String, stderr: String }, /// Error when the database could not be created #[error("{0}")] CreateDatabaseError(String), /// Error when accessing the database #[error(transparent)] DatabaseError(#[from] sqlx::Error), /// Error when determining if the database exists #[error("{0}")] DatabaseExistsError(String), /// Error when the database could not be initialized #[error("{0}")] DatabaseInitializationError(String), /// Error when the database could not be started #[error("{0}")] DatabaseStartError(String), /// Error when the database could not be stopped #[error("{0}")] DatabaseStopError(String), /// Error when the database could not be dropped #[error("{0}")] DropDatabaseError(String), /// Error when an invalid URL is provided #[error("Invalid URL: {url}; {message}")] InvalidUrl { url: String, message: String }, /// Error when IO operations fail #[error("{0}")] IoError(String), /// Parse error #[error(transparent)] ParseError(#[from] semver::Error), } /// Convert `PostgreSQL` [archive errors](postgresql_archive::Error) to an [embedded errors](Error::ArchiveError) impl From for Error { fn from(error: postgresql_archive::Error) -> Self { Error::ArchiveError(error) } } /// Convert [standard IO errors](std::io::Error) to a [embedded errors](Error::IoError) impl From for Error { fn from(error: std::io::Error) -> Self { Error::IoError(error.to_string()) } } /// Convert [utf8 errors](FromUtf8Error) to [embedded errors](Error::IoError) impl From for Error { fn from(error: FromUtf8Error) -> Self { Error::IoError(error.to_string()) } } /// These are relatively low value tests; they are here to reduce the coverage gap and /// ensure that the error conversions are working as expected. #[cfg(test)] mod test { use super::*; #[test] fn test_from_archive_error() { let archive_error = postgresql_archive::Error::VersionNotFound("test".to_string()); let error = Error::from(archive_error); assert_eq!(error.to_string(), "version not found for 'test'"); } #[test] fn test_from_io_error() { let io_error = std::io::Error::other("test"); let error = Error::from(io_error); assert_eq!(error.to_string(), "test"); } #[test] fn test_from_utf8_error() { let invalid_utf8: Vec = vec![0, 159, 146, 150]; let from_utf8_error = String::from_utf8(invalid_utf8).expect_err("from utf8 error"); let error = Error::from(from_utf8_error); assert_eq!( error.to_string(), "invalid utf-8 sequence of 1 bytes from index 1" ); } } ================================================ FILE: postgresql_embedded/src/lib.rs ================================================ //! # postgresql_embedded //! //! [![Code Coverage](https://codecov.io/gh/theseus-rs/postgresql-embedded/branch/main/graph/badge.svg)](https://codecov.io/gh/theseus-rs/postgresql-embedded) //! [![Benchmarks](https://img.shields.io/badge/%F0%9F%90%B0_bencher-enabled-6ec241)](https://bencher.dev/perf/theseus-rs-postgresql-embedded) //! [![License](https://img.shields.io/crates/l/postgresql_embedded)](https://github.com/theseus-rs/postgresql-embedded/tree/main/postgresql_embedded#license) //! [![Semantic Versioning](https://img.shields.io/badge/%E2%9A%99%EF%B8%8F_SemVer-2.0.0-blue)](https://semver.org/spec/v2.0.0.html) //! //! Install and run a PostgreSQL database locally on Linux, MacOS or Windows. PostgreSQL can be //! bundled with your application, or downloaded on demand. //! //! ## Table of contents //! //! - [Examples](#examples) //! - [Information](#information) //! - [Feature flags](#feature-flags) //! - [Safety](#safety) //! - [License](#license) //! - [Notes](#notes) //! //! ## Examples //! //! ### Asynchronous API //! //! ```no_run //! use postgresql_embedded::{PostgreSQL, Result}; //! //! #[tokio::main] //! async fn main() -> Result<()> { //! let mut postgresql = PostgreSQL::default(); //! postgresql.setup().await?; //! postgresql.start().await?; //! //! let database_name = "test"; //! postgresql.create_database(database_name).await?; //! postgresql.database_exists(database_name).await?; //! postgresql.drop_database(database_name).await?; //! //! postgresql.stop().await //! } //! ``` //! //! ### Synchronous API //! ```no_run //! #[cfg(feature = "blocking")] { //! use postgresql_embedded::blocking::PostgreSQL; //! //! let mut postgresql = PostgreSQL::default(); //! postgresql.setup().unwrap(); //! postgresql.start().unwrap(); //! //! let database_name = "test"; //! postgresql.create_database(database_name).unwrap(); //! postgresql.database_exists(database_name).unwrap(); //! postgresql.drop_database(database_name).unwrap(); //! //! postgresql.stop().unwrap(); //! } //! ``` //! //! ### Settings Builder //! //! ```no_run //! use postgresql_embedded::{PostgreSQL, Result, SettingsBuilder}; //! //! #[tokio::main] //! async fn main() -> Result<()> { //! let settings = SettingsBuilder::new() //! .host("127.0.0.1") //! .port(5433) //! .username("admin") //! .password("secret") //! .temporary(false) //! .config("max_connections", "100") //! .build(); //! //! let mut postgresql = PostgreSQL::new(settings); //! postgresql.setup().await?; //! postgresql.start().await?; //! //! postgresql.stop().await //! } //! ``` //! //! ### Unix Socket //! //! ```no_run //! use postgresql_embedded::{PostgreSQL, Result, SettingsBuilder}; //! use std::path::PathBuf; //! //! #[tokio::main] //! async fn main() -> Result<()> { //! let settings = SettingsBuilder::new() //! .socket_dir(PathBuf::from("/tmp/pg_socket")) //! .build(); //! //! let mut postgresql = PostgreSQL::new(settings); //! postgresql.setup().await?; //! postgresql.start().await?; //! //! let database_name = "test"; //! postgresql.create_database(database_name).await?; //! postgresql.database_exists(database_name).await?; //! postgresql.drop_database(database_name).await?; //! //! postgresql.stop().await //! } //! ``` //! //! ## Information //! //! During the build process, when the `bundled` feature is enabled, the PostgreSQL binaries are //! downloaded and included in the resulting binary. The version of the PostgreSQL binaries is //! determined by the `POSTGRESQL_VERSION` environment variable. If the `POSTGRESQL_VERSION` //! environment variable is not set, then `postgresql_archive::LATEST` will be used to determine the //! version of the PostgreSQL binaries to download. //! //! When downloading the theseus PostgreSQL binaries, either during build, or at runtime, the //! `GITHUB_TOKEN` environment variable can be set to a GitHub personal access token to increase //! the rate limit for downloading the PostgreSQL binaries. The `GITHUB_TOKEN` environment //! variable is not required. //! //! At runtime, the PostgreSQL binaries are cached by default in the following directories: //! //! - Unix: `$HOME/.theseus/postgresql` //! - Windows: `%USERPROFILE%\.theseus\postgresql` //! //! Performance can be improved by using a specific version of the PostgreSQL binaries (e.g. `=16.10.0`). //! After the first download, the PostgreSQL binaries will be cached and reused for subsequent runs. //! Further, the repository will no longer be queried to calculate the version match. //! //! ## Feature flags //! //! postgresql_embedded uses feature flags to address compile time and binary size //! uses. //! //! The following features are available: //! //! //! | Name | Description | Default? | //! |--------------|----------------------------------------------------------|----------| //! | `bundled` | Bundles the PostgreSQL archive into the resulting binary | No | //! | `blocking` | Enables the blocking API; requires `tokio` | No | //! | `native-tls` | Enables native-tls support | Yes | //! | `rustls` | Enables rustls support | No | //! | `theseus` | Enables theseus PostgreSQL binaries | Yes | //! | `tokio` | Enables using tokio for async | No | //! | `zonky` | Enables zonky PostgreSQL binaries | No | //! //! ## Safety //! //! These crates use `#![forbid(unsafe_code)]` to ensure everything is implemented in 100% safe Rust. //! //! ## License //! //! Licensed under either of //! //! * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or ) //! * MIT license ([LICENSE-MIT](LICENSE-MIT) or ) //! //! at your option. //! //! PostgreSQL is covered under [The PostgreSQL License](https://opensource.org/licenses/postgresql). #[cfg(feature = "blocking")] pub mod blocking; mod error; mod postgresql; mod settings; pub use error::{Error, Result}; pub use postgresql::{PostgreSQL, Status}; pub use postgresql_archive::{Version, VersionReq}; pub use settings::{Settings, SettingsBuilder}; use std::sync::LazyLock; /// The latest PostgreSQL version requirement pub static LATEST: VersionReq = VersionReq::STAR; /// The latest PostgreSQL version 18 pub static V18: LazyLock = LazyLock::new(|| VersionReq::parse("=18").unwrap()); /// The latest PostgreSQL version 17 pub static V17: LazyLock = LazyLock::new(|| VersionReq::parse("=17").unwrap()); /// The latest PostgreSQL version 16 pub static V16: LazyLock = LazyLock::new(|| VersionReq::parse("=16").unwrap()); /// The latest PostgreSQL version 15 pub static V15: LazyLock = LazyLock::new(|| VersionReq::parse("=15").unwrap()); /// The latest PostgreSQL version 14 #[deprecated( since = "0.18.0", note = "See https://www.postgresql.org/developer/roadmap/" )] pub static V14: LazyLock = LazyLock::new(|| VersionReq::parse("=14").unwrap()); pub use settings::BOOTSTRAP_DATABASE; pub use settings::BOOTSTRAP_SUPERUSER; #[cfg(test)] mod tests { use super::*; #[test] fn test_version() -> Result<()> { let version = VersionReq::parse("=18.2.0")?; assert_eq!(version.to_string(), "=18.2.0"); Ok(()) } #[test] fn test_version_latest() { assert_eq!(LATEST.to_string(), "*"); } #[test] fn test_version_18() { assert_eq!(V18.to_string(), "=18"); } #[test] fn test_version_17() { assert_eq!(V17.to_string(), "=17"); } #[test] fn test_version_16() { assert_eq!(V16.to_string(), "=16"); } #[test] fn test_version_15() { assert_eq!(V15.to_string(), "=15"); } #[test] #[allow(deprecated)] fn test_version_14() { assert_eq!(V14.to_string(), "=14"); } } ================================================ FILE: postgresql_embedded/src/postgresql.rs ================================================ use crate::error::Error::{DatabaseInitializationError, DatabaseStartError, DatabaseStopError}; use crate::error::Result; use crate::settings::{BOOTSTRAP_DATABASE, BOOTSTRAP_SUPERUSER, Settings}; use postgresql_archive::extract; #[cfg(not(feature = "bundled"))] use postgresql_archive::get_archive; use postgresql_archive::get_version; use postgresql_archive::{ExactVersion, ExactVersionReq}; #[cfg(feature = "tokio")] use postgresql_commands::AsyncCommandExecutor; use postgresql_commands::CommandBuilder; #[cfg(not(feature = "tokio"))] use postgresql_commands::CommandExecutor; use postgresql_commands::initdb::InitDbBuilder; use postgresql_commands::pg_ctl::Mode::{Start, Stop}; use postgresql_commands::pg_ctl::PgCtlBuilder; use postgresql_commands::pg_ctl::ShutdownMode::Fast; use semver::Version; use sqlx::{PgPool, Row}; use std::fs::{read_dir, remove_dir_all, remove_file}; use std::io::prelude::*; use std::net::TcpListener; use std::path::PathBuf; use tracing::{debug, instrument}; use crate::Error::{CreateDatabaseError, DatabaseExistsError, DropDatabaseError}; const PGDATABASE: &str = "PGDATABASE"; /// `PostgreSQL` status #[derive(Debug, Clone, Copy, PartialEq)] pub enum Status { /// Archive not installed NotInstalled, /// Installation complete; not initialized Installed, /// Server started Started, /// Server initialized and stopped Stopped, } /// `PostgreSQL` server #[derive(Clone, Debug)] pub struct PostgreSQL { settings: Settings, } /// `PostgreSQL` server methods impl PostgreSQL { /// Create a new [`PostgreSQL`] instance #[must_use] pub fn new(settings: Settings) -> Self { let mut postgresql = PostgreSQL { settings }; // If an exact version is set, append the version to the installation directory to avoid // conflicts with other versions. This will also facilitate setting the status of the // server to the correct initial value. If the minor and release version are not set, the // installation directory will be determined dynamically during the installation process. if !postgresql.settings.trust_installation_dir && let Some(version) = postgresql.settings.version.exact_version() { let path = &postgresql.settings.installation_dir; let version_string = version.to_string(); if !path.ends_with(&version_string) { postgresql.settings.installation_dir = postgresql.settings.installation_dir.join(version_string); } } postgresql } /// Get the [status](Status) of the PostgreSQL server #[instrument(level = "debug", skip(self))] pub fn status(&self) -> Status { if self.is_running() { Status::Started } else if self.is_initialized() { Status::Stopped } else if self.installed_dir().is_some() { Status::Installed } else { Status::NotInstalled } } /// Get the [settings](Settings) of the `PostgreSQL` server #[must_use] pub fn settings(&self) -> &Settings { &self.settings } /// Find a directory where `PostgreSQL` server is installed. /// This first checks if the installation directory exists and matches the version requirement. /// If it doesn't, it will search all the child directories for the latest version that matches the requirement. /// If it returns None, we couldn't find a matching installation. fn installed_dir(&self) -> Option { if self.settings.trust_installation_dir { return Some(self.settings.installation_dir.clone()); } let path = &self.settings.installation_dir; let maybe_path_version = path .file_name() .and_then(|file_name| Version::parse(&file_name.to_string_lossy()).ok()); // If this directory matches the version requirement, we're done. if let Some(path_version) = maybe_path_version && self.settings.version.matches(&path_version) && path.exists() { return Some(path.clone()); } // Get all directories in the path as versions. let mut versions = read_dir(path) .ok()? .filter_map(|entry| { let Some(entry) = entry.ok() else { // We ignore filesystem errors. return None; }; // Skip non-directories if !entry.file_type().ok()?.is_dir() { return None; } let file_name = entry.file_name(); let version = Version::parse(&file_name.to_string_lossy()).ok()?; if self.settings.version.matches(&version) { Some((version, entry.path())) } else { None } }) .collect::>(); // Sort the versions in descending order i.e. latest version first versions.sort_by(|(a, _), (b, _)| b.cmp(a)); // Get the first matching version as the best match versions.first().map(|(_, path)| path.clone()) } /// Check if the `PostgreSQL` server is initialized fn is_initialized(&self) -> bool { self.settings.data_dir.join("postgresql.conf").exists() } /// Check if the `PostgreSQL` server is running fn is_running(&self) -> bool { let pid_file = self.settings.data_dir.join("postmaster.pid"); pid_file.exists() } /// Set up the database by extracting the archive and initializing the database. /// If the installation directory already exists, the archive will not be extracted. /// If the data directory already exists, the database will not be initialized. /// /// # Errors /// /// If the installation fails, an error will be returned. #[instrument(skip(self))] pub async fn setup(&mut self) -> Result<()> { match self.installed_dir() { Some(installed_dir) => { self.settings.installation_dir = installed_dir; } None => { self.install().await?; } } if !self.is_initialized() { self.initialize().await?; } Ok(()) } /// Install the PostgreSQL server from the archive. If the version minor and/or release are not set, /// the latest version will be determined dynamically during the installation process. If the archive /// hash does not match the expected hash, an error will be returned. If the installation directory /// already exists, the archive will not be extracted. If the archive is not found, an error will be /// returned. #[instrument(skip(self))] async fn install(&mut self) -> Result<()> { #[cfg(feature = "bundled")] { self.settings.version = crate::settings::ARCHIVE_VERSION.clone(); } debug!( "Starting installation process for version {}", self.settings.version ); // If the exact version is not set, determine the latest version and update the version and // installation directory accordingly. This is an optimization to avoid downloading the // archive if the latest version is already installed. if self.settings.version.exact_version().is_none() { let version = get_version(&self.settings.releases_url, &self.settings.version).await?; self.settings.version = version.exact_version_req()?; self.settings.installation_dir = self.settings.installation_dir.join(version.to_string()); } if self.settings.installation_dir.exists() { debug!("Installation directory already exists"); return Ok(()); } let url = &self.settings.releases_url; // When the `bundled` feature is enabled, use the bundled archive instead of downloading it // from the internet. #[cfg(feature = "bundled")] let bytes = { debug!("Using bundled installation archive"); crate::settings::ARCHIVE.to_vec() }; #[cfg(not(feature = "bundled"))] let bytes = { let (version, bytes) = get_archive(url, &self.settings.version).await?; self.settings.version = version.exact_version_req()?; bytes }; extract(url, &bytes, &self.settings.installation_dir).await?; debug!( "Installed PostgreSQL version {} to {}", self.settings.version, self.settings.installation_dir.to_string_lossy() ); Ok(()) } /// Initialize the database in the data directory. This will create the necessary files and /// directories to start the database. #[instrument(skip(self))] async fn initialize(&mut self) -> Result<()> { if !self.settings.password_file.exists() { let mut file = std::fs::File::create(&self.settings.password_file)?; file.write_all(self.settings.password.as_bytes())?; } debug!( "Initializing database {}", self.settings.data_dir.to_string_lossy() ); let initdb = InitDbBuilder::from(&self.settings) .pgdata(&self.settings.data_dir) .username(BOOTSTRAP_SUPERUSER) .auth("password") .pwfile(&self.settings.password_file) .encoding("UTF8"); match self.execute_command(initdb).await { Ok((_stdout, _stderr)) => { debug!( "Initialized database {}", self.settings.data_dir.to_string_lossy() ); Ok(()) } Err(error) => Err(DatabaseInitializationError(error.to_string())), } } /// Start the database and wait for the startup to complete. /// If the port is set to `0`, the database will be started on a random port. /// If `socket_dir` is configured, the server will also listen on a Unix socket. /// /// # Errors /// /// If the database fails to start, an error will be returned. #[instrument(skip(self))] pub async fn start(&mut self) -> Result<()> { if self.settings.port == 0 { let listener = TcpListener::bind(("0.0.0.0", 0))?; self.settings.port = listener.local_addr()?.port(); } // Create the socket directory if configured and it doesn't exist #[cfg(unix)] if let Some(ref socket_dir) = self.settings.socket_dir && !socket_dir.exists() { std::fs::create_dir_all(socket_dir)?; } debug!( "Starting database {} on port {}{}", self.settings.data_dir.to_string_lossy(), self.settings.port, self.settings .socket_dir .as_ref() .map_or(String::new(), |d| format!( " with socket dir {}", d.to_string_lossy() )) ); let start_log = self.settings.data_dir.join("start.log"); let mut options = Vec::new(); options.push(format!("-F -p {}", self.settings.port)); #[cfg(unix)] if let Some(ref socket_dir) = self.settings.socket_dir { options.push(format!("-k {}", socket_dir.to_string_lossy())); } for (key, value) in &self.settings.configuration { options.push(format!("-c {key}={value}")); } let pg_ctl = PgCtlBuilder::from(&self.settings) .env(PGDATABASE, "") .mode(Start) .pgdata(&self.settings.data_dir) .log(start_log) .options(options.as_slice()) .wait(); match self.execute_command(pg_ctl).await { Ok((_stdout, _stderr)) => { debug!( "Started database {} on port {}{}", self.settings.data_dir.to_string_lossy(), self.settings.port, self.settings .socket_dir .as_ref() .map_or(String::new(), |d| format!( " with socket dir {}", d.to_string_lossy() )) ); Ok(()) } Err(error) => Err(DatabaseStartError(error.to_string())), } } /// Stop the database gracefully (smart mode) and wait for the shutdown to complete. /// /// # Errors /// /// If the database fails to stop, an error will be returned. #[instrument(skip(self))] pub async fn stop(&self) -> Result<()> { debug!( "Stopping database {}", self.settings.data_dir.to_string_lossy() ); let pg_ctl = PgCtlBuilder::from(&self.settings) .mode(Stop) .pgdata(&self.settings.data_dir) .shutdown_mode(Fast) .wait(); match self.execute_command(pg_ctl).await { Ok((_stdout, _stderr)) => { debug!( "Stopped database {}", self.settings.data_dir.to_string_lossy() ); Ok(()) } Err(error) => Err(DatabaseStopError(error.to_string())), } } /// Get a connection pool to the bootstrap database. async fn get_pool(&self) -> Result { let mut settings = self.settings.clone(); settings.username = BOOTSTRAP_SUPERUSER.to_string(); let database_url = settings.url(BOOTSTRAP_DATABASE); let pool = PgPool::connect(database_url.as_str()).await?; Ok(pool) } /// Create a new database with the given name. /// /// # Errors /// /// If the database creation fails, an error will be returned. #[instrument(skip(self))] pub async fn create_database(&self, database_name: S) -> Result<()> where S: AsRef + std::fmt::Debug, { let database_name = database_name.as_ref(); debug!( "Creating database {database_name} for {host}:{port}", host = self.settings.host, port = self.settings.port ); let pool = self.get_pool().await?; sqlx::query(format!("CREATE DATABASE \"{database_name}\"").as_str()) .execute(&pool) .await .map_err(|error| CreateDatabaseError(error.to_string()))?; pool.close().await; debug!( "Created database {database_name} for {host}:{port}", host = self.settings.host, port = self.settings.port ); Ok(()) } /// Check if a database with the given name exists. /// /// # Errors /// /// If the query fails, an error will be returned. #[instrument(skip(self))] pub async fn database_exists(&self, database_name: S) -> Result where S: AsRef + std::fmt::Debug, { let database_name = database_name.as_ref(); debug!( "Checking if database {database_name} exists for {host}:{port}", host = self.settings.host, port = self.settings.port ); let pool = self.get_pool().await?; let row = sqlx::query("SELECT COUNT(*) FROM pg_database WHERE datname = $1") .bind(database_name.to_string()) .fetch_one(&pool) .await .map_err(|error| DatabaseExistsError(error.to_string()))?; let count: i64 = row.get(0); pool.close().await; Ok(count == 1) } /// Drop a database with the given name. /// /// # Errors /// /// If the database does not exist or if the drop command fails, an error will be returned. #[instrument(skip(self))] pub async fn drop_database(&self, database_name: S) -> Result<()> where S: AsRef + std::fmt::Debug, { let database_name = database_name.as_ref(); debug!( "Dropping database {database_name} for {host}:{port}", host = self.settings.host, port = self.settings.port ); let pool = self.get_pool().await?; sqlx::query(format!("DROP DATABASE IF EXISTS \"{database_name}\"").as_str()) .execute(&pool) .await .map_err(|error| DropDatabaseError(error.to_string()))?; pool.close().await; debug!( "Dropped database {database_name} for {host}:{port}", host = self.settings.host, port = self.settings.port ); Ok(()) } #[cfg(not(feature = "tokio"))] /// Execute a command and return the stdout and stderr as strings. #[instrument(level = "debug", skip(self, command_builder), fields(program = ?command_builder.get_program()))] async fn execute_command( &self, command_builder: B, ) -> postgresql_commands::Result<(String, String)> { let mut command = command_builder.build(); command.execute() } #[cfg(feature = "tokio")] /// Execute a command and return the stdout and stderr as strings. #[instrument(level = "debug", skip(self, command_builder), fields(program = ?command_builder.get_program()))] async fn execute_command( &self, command_builder: B, ) -> postgresql_commands::Result<(String, String)> { let mut command = command_builder.build_tokio(); command.execute(self.settings.timeout).await } } /// Default `PostgreSQL` server impl Default for PostgreSQL { fn default() -> Self { Self::new(Settings::default()) } } /// Stop the `PostgreSQL` server and remove the data directory if it is marked as temporary. impl Drop for PostgreSQL { fn drop(&mut self) { if self.status() == Status::Started { let mut pg_ctl = PgCtlBuilder::from(&self.settings) .mode(Stop) .pgdata(&self.settings.data_dir) .shutdown_mode(Fast) .wait() .build(); let _ = pg_ctl.output(); } if self.settings.temporary { let _ = remove_dir_all(&self.settings.data_dir); let _ = remove_file(&self.settings.password_file); if let Some(ref socket_dir) = self.settings.socket_dir { let _ = remove_dir_all(socket_dir); } } } } ================================================ FILE: postgresql_embedded/src/settings.rs ================================================ use crate::error::{Error, Result}; use postgresql_archive::VersionReq; #[cfg(feature = "bundled")] use postgresql_archive::{ExactVersionReq, Version}; use rand::RngExt; use rand::distr::Alphanumeric; use std::collections::HashMap; use std::env; use std::env::{current_dir, home_dir}; use std::ffi::OsString; use std::path::PathBuf; #[cfg(feature = "bundled")] use std::sync::LazyLock; use std::time::Duration; use url::Url; #[cfg(feature = "bundled")] #[expect(clippy::unwrap_used)] pub(crate) static ARCHIVE_VERSION: LazyLock = LazyLock::new(|| { let version_string = include_str!(concat!(std::env!("OUT_DIR"), "/postgresql.version")); let version = Version::parse(version_string).unwrap(); let version_req = version.exact_version_req().unwrap(); tracing::debug!("Bundled installation archive version {version_string}"); version_req }); #[cfg(feature = "bundled")] pub(crate) const ARCHIVE: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/postgresql.tar.gz")); /// `PostgreSQL` superuser pub const BOOTSTRAP_SUPERUSER: &str = "postgres"; /// `PostgreSQL` database pub const BOOTSTRAP_DATABASE: &str = "postgres"; /// Database settings #[derive(Clone, Debug, PartialEq)] pub struct Settings { /// URL for the releases location of the `PostgreSQL` installation archives pub releases_url: String, /// Version requirement of `PostgreSQL` to install pub version: VersionReq, /// `PostgreSQL` installation directory pub installation_dir: PathBuf, /// `PostgreSQL` password file pub password_file: PathBuf, /// `PostgreSQL` data directory pub data_dir: PathBuf, /// `PostgreSQL` host pub host: String, /// `PostgreSQL` port pub port: u16, /// `PostgreSQL` user name pub username: String, /// `PostgreSQL` password pub password: String, /// Temporary database pub temporary: bool, /// Command execution Timeout pub timeout: Option, /// Server configuration options pub configuration: HashMap, /// Skip installation and inferrence of the installation dir. Trust what the user provided. pub trust_installation_dir: bool, /// Unix socket directory. When set, the server will listen on a Unix socket in this directory /// in addition to (or instead of) TCP/IP. Unix-only; ignored on Windows. pub socket_dir: Option, } /// Settings implementation impl Settings { /// Create a new instance of [`Settings`] pub fn new() -> Self { let home_dir = home_dir().unwrap_or_else(|| env::current_dir().unwrap_or_default()); let password_file_name = ".pgpass"; let password_file = if let Ok(dir) = tempfile::tempdir() { dir.keep().join(password_file_name) } else { let current_dir = current_dir().unwrap_or(PathBuf::from(".")); current_dir.join(password_file_name) }; let data_dir = if let Ok(dir) = tempfile::tempdir() { dir.keep() } else { let temp_dir: String = rand::rng() .sample_iter(&Alphanumeric) .take(16) .map(char::from) .collect(); let data_dir = current_dir().unwrap_or(PathBuf::from(".")); data_dir.join(temp_dir) }; let password = rand::rng() .sample_iter(&Alphanumeric) .take(16) .map(char::from) .collect(); #[cfg(feature = "theseus")] let releases_url = postgresql_archive::configuration::theseus::URL.to_string(); #[cfg(not(feature = "theseus"))] let releases_url = String::new(); Self { releases_url, version: default_version(), installation_dir: home_dir.join(".theseus").join("postgresql"), password_file, data_dir, host: "localhost".to_string(), port: 0, username: BOOTSTRAP_SUPERUSER.to_string(), password, temporary: true, timeout: Some(Duration::from_secs(5)), configuration: HashMap::new(), trust_installation_dir: false, socket_dir: None, } } /// Returns the binary directory for the configured `PostgreSQL` installation. #[must_use] pub fn binary_dir(&self) -> PathBuf { self.installation_dir.join("bin") } /// Return the `PostgreSQL` URL for the given database name. /// /// When `socket_dir` is set, the URL will use the Unix socket path /// (e.g. `postgresql://user:pass@localhost:5432/db?host=%2Fpath%2Fto%2Fsocket`). /// When `socket_dir` is `None`, a standard TCP URL is returned. pub fn url>(&self, database_name: S) -> String { match &self.socket_dir { Some(socket_dir) => { let socket_str = socket_dir.to_string_lossy(); let encoded: String = url::form_urlencoded::byte_serialize(socket_str.as_bytes()).collect(); format!( "postgresql://{}:{}@{}:{}/{}?host={}", self.username, self.password, self.host, self.port, database_name.as_ref(), encoded ) } None => { format!( "postgresql://{}:{}@{}:{}/{}", self.username, self.password, self.host, self.port, database_name.as_ref() ) } } } /// Create a new instance of [`Settings`] from the given URL. /// /// # Errors /// /// Returns an error if the URL is invalid. pub fn from_url>(url: S) -> Result { let parsed_url = match Url::parse(url.as_ref()) { Ok(parsed_url) => parsed_url, Err(error) => { return Err(Error::InvalidUrl { url: url.as_ref().to_string(), message: error.to_string(), }); } }; let query_parameters: HashMap = parsed_url.query_pairs().into_owned().collect(); let mut settings = Self::default(); if let Some(releases_url) = query_parameters.get("releases_url") { settings.releases_url = releases_url.to_string(); } if let Some(version) = query_parameters.get("version") { settings.version = VersionReq::parse(version)?; } if let Some(installation_dir) = query_parameters.get("installation_dir") { settings.installation_dir = PathBuf::from(installation_dir); } if let Some(password_file) = query_parameters.get("password_file") { settings.password_file = PathBuf::from(password_file); } if let Some(data_dir) = query_parameters.get("data_dir") { settings.data_dir = PathBuf::from(data_dir); } if let Some(host) = parsed_url.host() { settings.host = host.to_string(); } if let Some(port) = parsed_url.port() { settings.port = port; } if !parsed_url.username().is_empty() { settings.username = parsed_url.username().to_string(); } if let Some(password) = parsed_url.password() { settings.password = password.to_string(); } if let Some(temporary) = query_parameters.get("temporary") { settings.temporary = temporary == "true"; } if let Some(timeout) = query_parameters.get("timeout") { settings.timeout = match timeout.parse::() { Ok(timeout) => Some(Duration::from_secs(timeout)), Err(error) => { return Err(Error::InvalidUrl { url: url.as_ref().to_string(), message: error.to_string(), }); } }; } if let Some(trust_installation_dir) = query_parameters.get("trust_installation_dir") { settings.trust_installation_dir = trust_installation_dir == "true"; } if let Some(socket_dir) = query_parameters.get("socket_dir") { settings.socket_dir = Some(PathBuf::from(socket_dir)); } let configuration_prefix = "configuration."; for (key, value) in &query_parameters { if key.starts_with(configuration_prefix) && let Some(configuration_key) = key.strip_prefix(configuration_prefix) { settings .configuration .insert(configuration_key.to_string(), value.to_string()); } } Ok(settings) } } /// Implement the [`Settings`] trait for [`Settings`] impl postgresql_commands::Settings for Settings { fn get_binary_dir(&self) -> PathBuf { self.binary_dir().clone() } fn get_host(&self) -> OsString { self.host.parse().expect("host") } fn get_port(&self) -> u16 { self.port } fn get_username(&self) -> OsString { self.username.parse().expect("username") } fn get_password(&self) -> OsString { self.password.parse().expect("password") } fn get_socket_dir(&self) -> Option { self.socket_dir.clone() } } /// Default implementation for [`Settings`] impl Default for Settings { fn default() -> Self { Self::new() } } /// Builder for constructing [`Settings`] with a fluent API. /// /// # Examples /// /// ```no_run /// use postgresql_embedded::SettingsBuilder; /// /// let settings = SettingsBuilder::new() /// .host("127.0.0.1") /// .port(5433) /// .username("admin") /// .password("secret") /// .temporary(false) /// .build(); /// ``` /// /// To configure a Unix socket: /// /// ```no_run /// use postgresql_embedded::SettingsBuilder; /// use std::path::PathBuf; /// /// let settings = SettingsBuilder::new() /// .socket_dir(PathBuf::from("/tmp/pg_socket")) /// .build(); /// ``` #[derive(Clone, Debug)] pub struct SettingsBuilder { settings: Settings, } impl SettingsBuilder { /// Create a new [`SettingsBuilder`] starting from the default [`Settings`]. #[must_use] pub fn new() -> Self { Self { settings: Settings::new(), } } /// Set the releases URL for downloading PostgreSQL archives. #[must_use] pub fn releases_url>(mut self, releases_url: S) -> Self { self.settings.releases_url = releases_url.into(); self } /// Set the PostgreSQL version requirement. #[must_use] pub fn version(mut self, version: VersionReq) -> Self { self.settings.version = version; self } /// Set the installation directory. #[must_use] pub fn installation_dir>(mut self, dir: P) -> Self { self.settings.installation_dir = dir.into(); self } /// Set the password file path. #[must_use] pub fn password_file>(mut self, path: P) -> Self { self.settings.password_file = path.into(); self } /// Set the data directory. #[must_use] pub fn data_dir>(mut self, dir: P) -> Self { self.settings.data_dir = dir.into(); self } /// Set the host name or IP address. #[must_use] pub fn host>(mut self, host: S) -> Self { self.settings.host = host.into(); self } /// Set the TCP port number. #[must_use] pub fn port(mut self, port: u16) -> Self { self.settings.port = port; self } /// Set the database username. #[must_use] pub fn username>(mut self, username: S) -> Self { self.settings.username = username.into(); self } /// Set the database password. #[must_use] pub fn password>(mut self, password: S) -> Self { self.settings.password = password.into(); self } /// Set whether the database is temporary (cleaned up on drop). #[must_use] pub fn temporary(mut self, temporary: bool) -> Self { self.settings.temporary = temporary; self } /// Set the command execution timeout. #[must_use] pub fn timeout(mut self, timeout: Option) -> Self { self.settings.timeout = timeout; self } /// Set server configuration options. #[must_use] pub fn configuration(mut self, configuration: HashMap) -> Self { self.settings.configuration = configuration; self } /// Add a single server configuration option. #[must_use] pub fn config, V: Into>(mut self, key: K, value: V) -> Self { self.settings.configuration.insert(key.into(), value.into()); self } /// Set whether to trust the installation directory as-is. #[must_use] pub fn trust_installation_dir(mut self, trust: bool) -> Self { self.settings.trust_installation_dir = trust; self } /// Set the Unix socket directory. When set, the server will listen on a Unix socket in this directory. This is only /// supported on Unix platforms. #[must_use] pub fn socket_dir>(mut self, dir: P) -> Self { self.settings.socket_dir = Some(dir.into()); self } /// Consume the builder and return the configured [`Settings`]. #[must_use] pub fn build(self) -> Settings { self.settings } } impl Default for SettingsBuilder { fn default() -> Self { Self::new() } } /// Get the default version used if not otherwise specified #[must_use] fn default_version() -> VersionReq { #[cfg(feature = "bundled")] { ARCHIVE_VERSION.clone() } #[cfg(not(feature = "bundled"))] { VersionReq::STAR } } #[cfg(test)] mod tests { use super::*; use test_log::test; #[test] #[cfg(feature = "bundled")] fn test_archive_version() { assert!(!super::ARCHIVE_VERSION.to_string().is_empty()); } #[test] fn test_settings_new() { let settings = Settings::new(); assert!( !settings .installation_dir .to_str() .unwrap_or_default() .is_empty() ); assert!(settings.password_file.ends_with(".pgpass")); assert!(!settings.data_dir.to_str().unwrap_or_default().is_empty()); assert_eq!(0, settings.port); assert_eq!(BOOTSTRAP_SUPERUSER, settings.username); assert!(!settings.password.is_empty()); assert_ne!("password", settings.password); assert!(settings.binary_dir().ends_with("bin")); assert_eq!( "postgresql://postgres:password@localhost:0/test", settings .url("test") .replace(settings.password.as_str(), "password") ); assert_eq!(Some(Duration::from_secs(5)), settings.timeout); assert!(settings.configuration.is_empty()); assert!(settings.socket_dir.is_none()); } #[test] fn test_settings_url_with_socket_dir() { let mut settings = Settings::new(); settings.username = "user".to_string(); settings.password = "pass".to_string(); settings.host = "localhost".to_string(); settings.port = 5432; settings.socket_dir = Some(PathBuf::from("/tmp/pg_socket")); assert_eq!( "postgresql://user:pass@localhost:5432/test?host=%2Ftmp%2Fpg_socket", settings.url("test") ); } #[test] fn test_settings_from_url() -> Result<()> { let base_url = "postgresql://postgres:password@localhost:5432/test"; let releases_url = "releases_url=https%3A%2F%2Fgithub.com"; let version = "version=%3D16.4.0"; let installation_dir = "installation_dir=/tmp/postgresql"; let password_file = "password_file=/tmp/.pgpass"; let data_dir = "data_dir=/tmp/data"; let temporary = "temporary=false"; let trust_installation_dir = "trust_installation_dir=true"; let timeout = "timeout=10"; let configuration = "configuration.max_connections=42"; let url = format!( "{base_url}?{releases_url}&{version}&{installation_dir}&{password_file}&{data_dir}&{temporary}&{trust_installation_dir}&{timeout}&{configuration}" ); let settings = Settings::from_url(url)?; assert_eq!("https://github.com", settings.releases_url); assert_eq!(VersionReq::parse("=16.4.0")?, settings.version); assert_eq!(PathBuf::from("/tmp/postgresql"), settings.installation_dir); assert_eq!(PathBuf::from("/tmp/.pgpass"), settings.password_file); assert_eq!(PathBuf::from("/tmp/data"), settings.data_dir); assert_eq!("localhost", settings.host); assert_eq!(5432, settings.port); assert_eq!(BOOTSTRAP_SUPERUSER, settings.username); assert_eq!("password", settings.password); assert!(!settings.temporary); assert!(settings.trust_installation_dir); assert_eq!(Some(Duration::from_secs(10)), settings.timeout); let configuration = HashMap::from([("max_connections".to_string(), "42".to_string())]); assert_eq!(configuration, settings.configuration); assert!(settings.socket_dir.is_none()); assert_eq!(base_url, settings.url("test")); Ok(()) } #[test] fn test_settings_from_url_with_socket_dir() -> Result<()> { let url = "postgresql://postgres:password@localhost:5432/test?socket_dir=%2Ftmp%2Fpg_socket"; let settings = Settings::from_url(url)?; assert_eq!(Some(PathBuf::from("/tmp/pg_socket")), settings.socket_dir); assert_eq!("localhost", settings.host); assert_eq!(5432, settings.port); assert_eq!( "postgresql://postgres:password@localhost:5432/test?host=%2Ftmp%2Fpg_socket", settings.url("test") ); Ok(()) } #[test] fn test_settings_from_url_invalid_url() { assert!(Settings::from_url("^`~").is_err()); } #[test] fn test_settings_from_url_invalid_version() { assert!(Settings::from_url("postgresql://?version=foo").is_err()); } #[test] fn test_settings_from_url_invalid_timeout() { assert!(Settings::from_url("postgresql://?timeout=foo").is_err()); } #[test] fn test_settings_builder_defaults() { let settings = SettingsBuilder::new().build(); assert_eq!("localhost", settings.host); assert_eq!(0, settings.port); assert_eq!(BOOTSTRAP_SUPERUSER, settings.username); assert!(settings.temporary); assert!(settings.socket_dir.is_none()); assert_eq!(Some(Duration::from_secs(5)), settings.timeout); } #[test] fn test_settings_builder_all_fields() { let configuration = HashMap::from([("max_connections".to_string(), "100".to_string())]); let settings = SettingsBuilder::new() .releases_url("https://example.com") .version(VersionReq::STAR) .installation_dir("/tmp/install") .password_file("/tmp/.pgpass") .data_dir("/tmp/data") .host("127.0.0.1") .port(5433) .username("admin") .password("secret") .temporary(false) .timeout(Some(Duration::from_secs(30))) .configuration(configuration.clone()) .trust_installation_dir(true) .socket_dir(PathBuf::from("/tmp/pg_socket")) .build(); assert_eq!("https://example.com", settings.releases_url); assert_eq!(PathBuf::from("/tmp/install"), settings.installation_dir); assert_eq!(PathBuf::from("/tmp/.pgpass"), settings.password_file); assert_eq!(PathBuf::from("/tmp/data"), settings.data_dir); assert_eq!("127.0.0.1", settings.host); assert_eq!(5433, settings.port); assert_eq!("admin", settings.username); assert_eq!("secret", settings.password); assert!(!settings.temporary); assert_eq!(Some(Duration::from_secs(30)), settings.timeout); assert_eq!(configuration, settings.configuration); assert!(settings.trust_installation_dir); assert_eq!(Some(PathBuf::from("/tmp/pg_socket")), settings.socket_dir); } #[test] fn test_settings_builder_config_method() { let settings = SettingsBuilder::new() .config("max_connections", "42") .config("shared_buffers", "128MB") .build(); assert_eq!( Some(&"42".to_string()), settings.configuration.get("max_connections") ); assert_eq!( Some(&"128MB".to_string()), settings.configuration.get("shared_buffers") ); } #[test] fn test_settings_builder_socket_dir() { let settings = SettingsBuilder::new() .socket_dir(PathBuf::from("/tmp/pg_socket")) .build(); assert_eq!(Some(PathBuf::from("/tmp/pg_socket")), settings.socket_dir); } #[test] fn test_settings_builder_default() { let builder = SettingsBuilder::default(); let settings = builder.build(); assert_eq!("localhost", settings.host); assert_eq!(0, settings.port); } } ================================================ FILE: postgresql_embedded/tests/blocking.rs ================================================ #[cfg(feature = "blocking")] use postgresql_embedded::blocking::PostgreSQL; #[cfg(feature = "blocking")] use postgresql_embedded::{Result, Status}; #[cfg(feature = "blocking")] use test_log::test; #[cfg(feature = "blocking")] #[test] fn test_embedded_blocking_lifecycle() -> Result<()> { let mut postgresql = PostgreSQL::default(); let settings = postgresql.settings(); // Verify that an ephemeral instance is created by default assert_eq!(0, settings.port); assert!(settings.temporary); let initial_statuses = [Status::NotInstalled, Status::Installed, Status::Stopped]; assert!(initial_statuses.contains(&postgresql.status())); postgresql.setup()?; assert_eq!(Status::Stopped, postgresql.status()); postgresql.start()?; assert_eq!(Status::Started, postgresql.status()); let database_name = "test"; assert!(!postgresql.database_exists(database_name)?); postgresql.create_database(database_name)?; assert!(postgresql.database_exists(database_name)?); postgresql.drop_database(database_name)?; postgresql.stop()?; assert_eq!(Status::Stopped, postgresql.status()); Ok(()) } ================================================ FILE: postgresql_embedded/tests/dump_command.rs ================================================ use postgresql_commands::pg_dump::PgDumpBuilder; use postgresql_commands::psql::PsqlBuilder; use postgresql_commands::{CommandBuilder, CommandExecutor}; use postgresql_embedded::PostgreSQL; use std::fs; use tempfile::NamedTempFile; use test_log::test; #[test(tokio::test)] async fn dump_command() -> anyhow::Result<()> { let mut postgresql = PostgreSQL::default(); postgresql.setup().await?; postgresql.start().await?; let settings = postgresql.settings(); let database_name = "test"; postgresql.create_database(database_name).await?; let mut psql = PsqlBuilder::from(settings) .command("CREATE TABLE person42 (id INTEGER, name VARCHAR(20))") .dbname(database_name) .no_psqlrc() .no_align() .tuples_only() .build(); let (_stdout, _stderr) = psql.execute()?; let temp_file = NamedTempFile::new()?; let file = temp_file.as_ref(); let mut pgdump = PgDumpBuilder::from(settings) .dbname(database_name) .schema_only() .file(file.to_string_lossy().to_string()) .build(); let (_stdout, _stderr) = pgdump.execute()?; let contents = fs::read_to_string(file)?; assert!(contents.contains("person42")); Ok(()) } ================================================ FILE: postgresql_embedded/tests/environment_variables.rs ================================================ use postgresql_embedded::{PostgreSQL, Status}; use std::env; use test_log::test; #[test(tokio::test)] async fn lifecycle() -> anyhow::Result<()> { // Explicitly set PGDATABASE environment variable to verify that the library behavior // is not affected by the environment unsafe { env::set_var("PGDATABASE", "foodb"); } let mut postgresql = PostgreSQL::default(); postgresql.setup().await?; postgresql.start().await?; let database_name = "test"; assert!(!postgresql.database_exists(database_name).await?); postgresql.create_database(database_name).await?; assert!(postgresql.database_exists(database_name).await?); postgresql.drop_database(database_name).await?; postgresql.stop().await?; assert_eq!(Status::Stopped, postgresql.status()); Ok(()) } ================================================ FILE: postgresql_embedded/tests/postgresql.rs ================================================ use postgresql_commands::CommandBuilder; use postgresql_commands::psql::PsqlBuilder; use postgresql_embedded::{PostgreSQL, Result, Settings, Status}; use std::fs::{remove_dir_all, remove_file}; use test_log::test; async fn lifecycle() -> Result<()> { let mut postgresql = PostgreSQL::default(); let settings = postgresql.settings(); // Verify that an ephemeral instance is created by default assert_eq!(0, settings.port); assert!(settings.temporary); let initial_statuses = [Status::NotInstalled, Status::Installed, Status::Stopped]; assert!(initial_statuses.contains(&postgresql.status())); postgresql.setup().await?; assert_eq!(Status::Stopped, postgresql.status()); postgresql.start().await?; assert_eq!(Status::Started, postgresql.status()); let database_name = "test"; assert!(!postgresql.database_exists(database_name).await?); postgresql.create_database(database_name).await?; assert!(postgresql.database_exists(database_name).await?); postgresql.drop_database(database_name).await?; postgresql.stop().await?; assert_eq!(Status::Stopped, postgresql.status()); Ok(()) } #[test(tokio::test)] async fn test_embedded_async_lifecycle() -> Result<()> { lifecycle().await } #[test(tokio::test)] async fn test_temporary_database() -> Result<()> { let settings = Settings::default(); let data_dir = settings.data_dir.clone(); let password_file = settings.password_file.clone(); assert!(settings.temporary); { let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; assert!(data_dir.exists()); assert!(password_file.exists()); } // Verify that the data directory and password file are removed automatically when PostgreSQL is dropped assert!(!data_dir.exists()); assert!(!password_file.exists()); Ok(()) } #[test(tokio::test)] async fn test_persistent_database() -> Result<()> { let mut settings = Settings::default(); let data_dir = settings.data_dir.clone(); let password_file = settings.password_file.clone(); settings.temporary = false; { let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; assert!(data_dir.exists()); assert!(password_file.exists()); } // Verify that the data directory and password file are retained when PostgreSQL is dropped assert!(data_dir.exists()); assert!(password_file.exists()); let _ = remove_dir_all(&data_dir); let _ = remove_file(&password_file); Ok(()) } #[test(tokio::test)] async fn test_persistent_database_reuse() -> Result<()> { let database_name = "test"; let mut settings = Settings::default(); let data_dir = settings.data_dir.clone(); let password = settings.password.clone(); let password_file = settings.password_file.clone(); settings.temporary = false; { let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; postgresql.create_database(database_name).await?; assert!(postgresql.database_exists(database_name).await?); postgresql.stop().await?; } // Verify that the data directory and password file are retained when PostgreSQL is dropped assert!(data_dir.exists()); assert!(password_file.exists()); let settings = Settings { data_dir: data_dir.clone(), password: password.clone(), password_file: password_file.clone(), temporary: false, ..Default::default() }; { let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; assert!(postgresql.database_exists(database_name).await?); postgresql.stop().await?; } let _ = remove_dir_all(&data_dir); let _ = remove_file(&password_file); Ok(()) } #[test(tokio::test)] async fn postgres_concurrency() -> Result<()> { let handle1 = tokio::spawn(lifecycle()); let handle2 = tokio::spawn(lifecycle()); let handle3 = tokio::spawn(lifecycle()); match tokio::try_join!(handle1, handle2, handle3) { Ok(_) => {} Err(error) => { assert_eq!("", error.to_string()); } } Ok(()) } #[test(tokio::test)] async fn test_authentication_success() -> Result<()> { let mut postgresql = PostgreSQL::default(); postgresql.setup().await?; postgresql.start().await?; let mut psql = PsqlBuilder::from(postgresql.settings()) .command("SELECT 1") .no_psqlrc() .tuples_only() .build(); let output = psql.output()?; assert!(output.status.success()); Ok(()) } #[test(tokio::test)] async fn test_authentication_invalid_username() -> Result<()> { let mut postgresql = PostgreSQL::default(); postgresql.setup().await?; postgresql.start().await?; let mut psql = PsqlBuilder::from(postgresql.settings()) .command("SELECT 1") .username("invalid") .no_psqlrc() .tuples_only() .build(); let output = psql.output()?; assert!(!output.status.success()); Ok(()) } #[test(tokio::test)] async fn test_authentication_invalid_password() -> Result<()> { let mut postgresql = PostgreSQL::default(); postgresql.setup().await?; postgresql.start().await?; let mut psql = PsqlBuilder::from(postgresql.settings()) .command("SELECT 1") .pg_password("invalid") .no_psqlrc() .tuples_only() .build(); let output = psql.output()?; assert!(!output.status.success()); Ok(()) } #[test(tokio::test)] async fn test_username_setting() -> Result<()> { let settings = Settings { username: "admin".to_string(), ..Default::default() }; let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; let database_name = "test"; postgresql.create_database(database_name).await?; let database_exists = postgresql.database_exists(database_name).await?; assert!(database_exists); postgresql.drop_database(database_name).await?; let database_exists = postgresql.database_exists(database_name).await?; assert!(!database_exists); Ok(()) } ================================================ FILE: postgresql_embedded/tests/start_config.rs ================================================ use postgresql_embedded::{BOOTSTRAP_DATABASE, PostgreSQL, Settings}; use sqlx::{PgPool, Row}; use std::collections::HashMap; use test_log::test; #[test(tokio::test)] async fn start_config() -> anyhow::Result<()> { let configuration = HashMap::from([("max_connections".to_string(), "42".to_string())]); let settings = Settings { configuration, ..Default::default() }; let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; let settings = postgresql.settings(); let database_url = settings.url(BOOTSTRAP_DATABASE); let pool = PgPool::connect(database_url.as_str()).await?; let row = sqlx::query("SELECT setting FROM pg_settings WHERE name = $1") .bind("max_connections".to_string()) .fetch_one(&pool) .await?; let max_connections: String = row.get(0); pool.close().await; assert_eq!("42".to_string(), max_connections); Ok(()) } ================================================ FILE: postgresql_embedded/tests/unix_socket.rs ================================================ #[cfg(unix)] mod unix_socket_tests { use postgresql_embedded::{PostgreSQL, Result, SettingsBuilder, Status}; use sqlx::{PgPool, Row}; use std::path::PathBuf; use test_log::test; #[test(tokio::test)] async fn test_unix_socket_lifecycle() -> Result<()> { let socket_dir = tempfile::tempdir().expect("failed to create temp dir for socket"); let socket_path = socket_dir.path().to_path_buf(); let settings = SettingsBuilder::new() .socket_dir(socket_path.clone()) .build(); let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; assert_eq!(Status::Started, postgresql.status()); // Verify the socket file exists (PostgreSQL creates .s.PGSQL. in the socket dir) let port = postgresql.settings().port; let socket_file = socket_path.join(format!(".s.PGSQL.{port}")); assert!( socket_file.exists(), "Expected socket file at {socket_file:?}" ); let database_name = "test"; assert!(!postgresql.database_exists(database_name).await?); postgresql.create_database(database_name).await?; assert!(postgresql.database_exists(database_name).await?); postgresql.drop_database(database_name).await?; assert!(!postgresql.database_exists(database_name).await?); postgresql.stop().await?; assert_eq!(Status::Stopped, postgresql.status()); Ok(()) } #[test(tokio::test)] async fn test_unix_socket_with_builder() -> Result<()> { let socket_dir = tempfile::tempdir().expect("failed to create temp dir for socket"); let socket_path = socket_dir.path().to_path_buf(); let settings = SettingsBuilder::new() .socket_dir(socket_path.clone()) .config("max_connections", "50") .build(); assert_eq!(Some(socket_path), settings.socket_dir); assert_eq!( Some(&"50".to_string()), settings.configuration.get("max_connections") ); let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; let database_name = "builder_test"; postgresql.create_database(database_name).await?; assert!(postgresql.database_exists(database_name).await?); postgresql.drop_database(database_name).await?; postgresql.stop().await?; Ok(()) } #[test(tokio::test)] async fn test_unix_socket_temporary_cleanup() -> Result<()> { let socket_dir = tempfile::tempdir().expect("failed to create temp dir for socket"); let socket_path = socket_dir.keep(); let settings = SettingsBuilder::new() .socket_dir(socket_path.clone()) .temporary(true) .build(); let data_dir = settings.data_dir.clone(); let password_file = settings.password_file.clone(); { let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; assert!(socket_path.exists()); } // Verify that socket dir, data dir, and password file are cleaned up assert!(!data_dir.exists()); assert!(!password_file.exists()); assert!(!socket_path.exists()); Ok(()) } #[test] fn test_unix_socket_url_format() { let settings = SettingsBuilder::new() .host("localhost") .port(5432) .username("user") .password("pass") .socket_dir(PathBuf::from("/tmp/pg_socket")) .build(); assert_eq!( "postgresql://user:pass@localhost:5432/test?host=%2Ftmp%2Fpg_socket", settings.url("test") ); } #[test(tokio::test)] async fn test_connection_type_tcp_vs_unix_socket() -> Result<()> { let socket_dir = tempfile::tempdir().expect("failed to create temp dir for socket"); let socket_path = socket_dir.path().to_path_buf(); let settings = SettingsBuilder::new() .socket_dir(socket_path.clone()) .build(); let mut postgresql = PostgreSQL::new(settings); postgresql.setup().await?; postgresql.start().await?; let database_name = "conn_type_test"; postgresql.create_database(database_name).await?; let settings = postgresql.settings(); // Connect via TCP (construct URL without socket_dir query parameter) let tcp_url = format!( "postgresql://{}:{}@{}:{}/{}", settings.username, settings.password, settings.host, settings.port, database_name ); let tcp_pool = PgPool::connect(tcp_url.as_str()).await.unwrap(); let tcp_row = sqlx::query( "SELECT client_addr::TEXT, client_port \ FROM pg_stat_activity \ WHERE pid = pg_backend_pid()", ) .fetch_one(&tcp_pool) .await .unwrap(); let tcp_client_addr: Option = tcp_row.get("client_addr"); let tcp_client_port: Option = tcp_row.get("client_port"); tcp_pool.close().await; // TCP connections have a non-null client_addr and a positive client_port assert!( tcp_client_addr.is_some(), "TCP connection should have a client_addr, got None" ); assert!( tcp_client_port.is_some_and(|p| p > 0), "TCP connection should have a positive client_port, got {tcp_client_port:?}" ); // Connect via Unix socket (URL includes ?host=) let socket_url = settings.url(database_name); let socket_pool = PgPool::connect(socket_url.as_str()).await.unwrap(); let socket_row = sqlx::query( "SELECT client_addr::TEXT, client_port \ FROM pg_stat_activity \ WHERE pid = pg_backend_pid()", ) .fetch_one(&socket_pool) .await?; let socket_client_addr: Option = socket_row.get("client_addr"); let socket_client_port: Option = socket_row.get("client_port"); socket_pool.close().await; // Unix socket connections have null client_addr and client_port of -1 assert!( socket_client_addr.is_none(), "Unix socket connection should have null client_addr, got {socket_client_addr:?}" ); assert_eq!( socket_client_port, Some(-1), "Unix socket connection should have client_port of -1, got {socket_client_port:?}" ); postgresql.stop().await?; Ok(()) } } ================================================ FILE: postgresql_embedded/tests/zonky.rs ================================================ #[cfg(feature = "zonky")] use postgresql_archive::configuration::zonky; #[cfg(feature = "zonky")] use postgresql_embedded::{PostgreSQL, Result, Settings, Status}; #[tokio::test] #[cfg(feature = "zonky")] async fn test_zonky() -> Result<()> { let settings = Settings { releases_url: zonky::URL.to_string(), ..Default::default() }; let mut postgresql = PostgreSQL::new(settings); let settings = postgresql.settings(); // Verify that an ephemeral instance is created by default assert_eq!(0, settings.port); assert!(settings.temporary); let initial_statuses = [Status::NotInstalled, Status::Installed, Status::Stopped]; assert!(initial_statuses.contains(&postgresql.status())); postgresql.setup().await?; assert_eq!(Status::Stopped, postgresql.status()); postgresql.start().await?; assert_eq!(Status::Started, postgresql.status()); let database_name = "test"; assert!(!postgresql.database_exists(database_name).await?); postgresql.create_database(database_name).await?; assert!(postgresql.database_exists(database_name).await?); postgresql.drop_database(database_name).await?; postgresql.stop().await?; assert_eq!(Status::Stopped, postgresql.status()); Ok(()) } ================================================ FILE: postgresql_extensions/Cargo.toml ================================================ [package] authors.workspace = true categories.workspace = true description = "A library for managing PostgreSQL extensions" edition.workspace = true keywords.workspace = true license.workspace = true name = "postgresql_extensions" repository = "https://github.com/theseus-rs/postgresql-embedded" rust-version.workspace = true version.workspace = true [dependencies] async-trait = { workspace = true } postgresql_archive = { path = "../postgresql_archive", version = "0.20.2", default-features = false } postgresql_commands = { path = "../postgresql_commands", version = "0.20.2", default-features = false } regex-lite = { workspace = true } reqwest = { workspace = true, default-features = false, features = ["json"] } semver = { workspace = true, features = ["serde"] } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true, optional = true } target-triple = { workspace = true, optional = true } tempfile = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"], optional = true } tracing = { workspace = true, features = ["log"] } url = { workspace = true } [dev-dependencies] anyhow = { workspace = true } postgresql_embedded = { path = "../postgresql_embedded", version = "0.20.2" } test-log = { workspace = true } tokio = { workspace = true, features = ["full"] } [features] default = [ "native-tls", "portal-corp", "steampipe", "tensor-chord", ] blocking = ["tokio"] portal-corp = [ "dep:target-triple", "postgresql_archive/github", "postgresql_archive/zip", ] steampipe = [ "dep:serde_json", "postgresql_archive/github", "postgresql_archive/tar-gz", ] tensor-chord = [ "dep:target-triple", "postgresql_archive/github", "postgresql_archive/zip", ] tokio = [ "postgresql_commands/tokio", "dep:tokio" ] native-tls = [ "postgresql_archive/native-tls", "reqwest/native-tls", ] rustls = [ "postgresql_archive/rustls", "reqwest/rustls", ] [package.metadata.cargo-machete] ignored = ["reqwest"] ================================================ FILE: postgresql_extensions/README.md ================================================ # PostgreSQL Extensions [![ci](https://github.com/theseus-rs/postgresql-embedded/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/theseus-rs/postgresql-embedded/actions/workflows/ci.yml) [![Documentation](https://docs.rs/postgresql_extensions/badge.svg)](https://docs.rs/postgresql_extensions) [![Code Coverage](https://codecov.io/gh/theseus-rs/postgresql-embedded/branch/main/graph/badge.svg)](https://codecov.io/gh/theseus-rs/postgresql-embedded) [![Benchmarks](https://img.shields.io/badge/%F0%9F%90%B0_bencher-enabled-6ec241)](https://bencher.dev/perf/theseus-rs-postgresql-embedded) [![Latest version](https://img.shields.io/crates/v/postgresql_extensions.svg)](https://crates.io/crates/postgresql_extensions) [![License](https://img.shields.io/crates/l/postgresql_extensions?)](https://github.com/theseus-rs/postgresql-embedded/tree/main/postgresql_extensions#license) [![Semantic Versioning](https://img.shields.io/badge/%E2%9A%99%EF%B8%8F_SemVer-2.0.0-blue)](https://semver.org/spec/v2.0.0.html) A configurable library for managing PostgreSQL extensions. ## Examples ### Asynchronous API ```rust use postgresql_extensions::{get_available_extensions, Result}; #[tokio::main] async fn main() -> Result<()> { let extensions = get_available_extensions().await?; Ok(()) } ``` ### Synchronous API ```rust use postgresql_extensions::Result; use postgresql_extensions::blocking::get_available_extensions; async fn main() -> Result<()> { let extensions = get_available_extensions().await?; Ok(()) } ``` ## Feature flags postgresql_extensions uses [feature flags] to address compile time and binary size uses. The following features are available: | Name | Description | Default? | |--------------|----------------------------|----------| | `blocking` | Enables the blocking API | No | | `native-tls` | Enables native-tls support | Yes | | `rustls-tls` | Enables rustls-tls support | No | ### Repositories | Name | Description | Default? | |----------------|-------------------------------------------|----------| | `portal-corp` | Enables PortalCorp PostgreSQL extensions | Yes | | `steampipe` | Enables Steampipe PostgreSQL extensions | Yes | | `tensor-chord` | Enables TensorChord PostgreSQL extensions | Yes | ## Supported platforms `postgresql_extensions` provides implementations for the following: * [steampipe/repositories](https://github.com/orgs/turbot/repositories) * [tensor-chord/pgvecto.rs](https://github.com/tensor-chord/pgvecto.rs) ## License Licensed under either of * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) at your option. ## Contribution Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. ================================================ FILE: postgresql_extensions/src/blocking/extensions.rs ================================================ #![allow(dead_code)] use crate::model::AvailableExtension; use crate::{InstalledExtension, Result}; use postgresql_commands::Settings; use semver::VersionReq; use std::sync::LazyLock; use tokio::runtime::Runtime; static RUNTIME: LazyLock = LazyLock::new(|| Runtime::new().unwrap()); /// Gets the available extensions. /// /// # Errors /// * If an error occurs while getting the extensions. pub fn get_available_extensions() -> Result> { RUNTIME .handle() .block_on(async move { crate::get_available_extensions().await }) } /// Gets the installed extensions. /// /// # Errors /// * If an error occurs while getting the installed extensions. pub fn get_installed_extensions(settings: &impl Settings) -> Result> { RUNTIME .handle() .block_on(async move { crate::get_installed_extensions(settings).await }) } /// Installs the extension with the specified `namespace`, `name`, and `version`. /// /// # Errors /// * If an error occurs while installing the extension. pub fn install( settings: &impl Settings, namespace: &str, name: &str, version: &VersionReq, ) -> Result<()> { RUNTIME .handle() .block_on(async move { crate::install(settings, namespace, name, version).await }) } /// Uninstalls the extension with the specified `namespace` and `name`. /// /// # Errors /// * If an error occurs while uninstalling the extension. pub fn uninstall(settings: &impl Settings, namespace: &str, name: &str) -> Result<()> { RUNTIME .handle() .block_on(async move { crate::uninstall(settings, namespace, name).await }) } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; #[test] fn test_get_installed_extensions() -> Result<()> { let extensions = get_installed_extensions(&TestSettings)?; assert!(extensions.is_empty()); Ok(()) } } ================================================ FILE: postgresql_extensions/src/blocking/mod.rs ================================================ mod extensions; pub use extensions::{get_available_extensions, get_installed_extensions, install, uninstall}; ================================================ FILE: postgresql_extensions/src/error.rs ================================================ use std::sync::PoisonError; /// PostgreSQL extensions result type pub type Result = core::result::Result; /// PostgreSQL extensions errors #[derive(Debug, thiserror::Error)] pub enum Error { /// Archive error #[error(transparent)] ArchiveError(#[from] postgresql_archive::Error), /// Error when a command fails #[error(transparent)] CommandError(#[from] postgresql_commands::Error), /// Extension not found #[error("extension not found '{0}'")] ExtensionNotFound(String), /// Error when an IO operation fails #[error("{0}")] IoError(String), /// Poisoned lock #[error("poisoned lock '{0}'")] PoisonedLock(String), /// Error when a regex operation fails #[error(transparent)] RegexError(#[from] regex_lite::Error), /// Error when a deserialization or serialization operation fails #[error(transparent)] SerdeError(#[from] serde_json::Error), /// Unsupported namespace #[error("unsupported namespace '{0}'")] UnsupportedNamespace(String), } /// Converts a [`std::sync::PoisonError`] into a [`ParseError`](Error::PoisonedLock) impl From> for Error { fn from(value: PoisonError) -> Self { Error::PoisonedLock(value.to_string()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_from_poison_error() { let error = Error::from(std::sync::PoisonError::new(())); assert!(matches!(error, Error::PoisonedLock(_))); assert!(error.to_string().contains("poisoned lock")); } } ================================================ FILE: postgresql_extensions/src/extensions.rs ================================================ use crate::Error::IoError; use crate::model::AvailableExtension; use crate::repository::registry; use crate::repository::registry::get_repositories; use crate::{InstalledConfiguration, InstalledExtension, Result}; #[cfg(feature = "tokio")] use postgresql_commands::AsyncCommandExecutor; use postgresql_commands::CommandBuilder; #[cfg(not(feature = "tokio"))] use postgresql_commands::CommandExecutor; use postgresql_commands::Settings; use postgresql_commands::pg_config::PgConfigBuilder; use postgresql_commands::postgres::PostgresBuilder; use regex_lite::Regex; use semver::VersionReq; use std::path::PathBuf; use tracing::{debug, instrument}; const CONFIGURATION_FILE: &str = "postgresql_extensions.json"; /// Gets the available extensions. /// /// # Errors /// * If an error occurs while getting the extensions. #[instrument(level = "debug")] pub async fn get_available_extensions() -> Result> { let mut extensions = Vec::new(); for repository in get_repositories()? { for extension in repository.get_available_extensions().await? { extensions.push(extension); } } Ok(extensions) } /// Gets the installed extensions. /// /// # Errors /// * If an error occurs while getting the installed extensions. #[instrument(level = "debug", skip(settings))] pub async fn get_installed_extensions(settings: &impl Settings) -> Result> { let configuration_file = get_configuration_file(settings).await?; if !configuration_file.exists() { debug!("No configuration file found: {configuration_file:?}"); return Ok(Vec::new()); } let configuration = InstalledConfiguration::read(configuration_file).await?; let extensions = configuration.extensions(); Ok(extensions.clone()) } /// Installs the extension with the specified `namespace`, `name`, and `version`. /// /// # Errors /// * If an error occurs while installing the extension. #[instrument(level = "debug", skip(settings))] pub async fn install( settings: &impl Settings, namespace: &str, name: &str, version: &VersionReq, ) -> Result<()> { let extensions = get_installed_extensions(settings).await?; if extensions .iter() .any(|extension| extension.namespace() == namespace && extension.name() == name) { // Attempt to uninstall the extension first uninstall(settings, namespace, name).await?; } let postgresql_version = get_postgresql_version(settings).await?; let repository = registry::get(namespace)?; let (version, archive) = repository .get_archive(postgresql_version.as_str(), name, version) .await?; let library_dir = get_library_path(settings).await?; let extension_dir = get_extension_path(settings).await?; let files = repository .install(name, library_dir, extension_dir, &archive) .await?; let configuration_file = get_configuration_file(settings).await?; let mut configuration = if configuration_file.exists() { InstalledConfiguration::read(&configuration_file).await? } else { debug!("No configuration file found: {configuration_file:?}; creating new file"); InstalledConfiguration::default() }; let installed_extension = InstalledExtension::new(namespace, name, version, files); configuration.extensions_mut().push(installed_extension); configuration.write(configuration_file).await?; Ok(()) } /// Uninstalls the extension with the specified `namespace` and `name`. /// /// # Errors /// * If an error occurs while uninstalling the extension. #[instrument(level = "debug", skip(settings))] pub async fn uninstall(settings: &impl Settings, namespace: &str, name: &str) -> Result<()> { let configuration_file = get_configuration_file(settings).await?; if !configuration_file.exists() { debug!("No configuration file found: {configuration_file:?}; nothing to uninstall"); return Ok(()); } let configuration = &mut InstalledConfiguration::read(&configuration_file).await?; let mut extensions = Vec::new(); for extension in configuration.extensions() { if extension.namespace() != namespace || extension.name() != name { extensions.push(extension.clone()); } for file in extension.files() { if file.exists() { debug!("Removing file: {file:?}"); #[cfg(feature = "tokio")] tokio::fs::remove_file(file) .await .map_err(|error| IoError(error.to_string()))?; #[cfg(not(feature = "tokio"))] std::fs::remove_file(file) .map_err(|error| crate::error::Error::IoError(error.to_string()))?; } } } let configuration = InstalledConfiguration::new(extensions); configuration.write(configuration_file).await?; Ok(()) } /// Gets the configuration file. /// /// # Errors /// * If an error occurs while getting the configuration file. async fn get_configuration_file(settings: &dyn Settings) -> Result { let shared_path = get_shared_path(settings).await?; let file = shared_path.join(CONFIGURATION_FILE); Ok(file) } /// Gets the library path. /// /// # Errors /// * If an error occurs while getting the library path. async fn get_library_path(settings: &dyn Settings) -> Result { let command = PgConfigBuilder::from(settings).libdir(); match execute_command(command).await { Ok((stdout, _stderr)) => Ok(PathBuf::from(stdout.trim())), Err(error) => { debug!("Failed to get library path using pg_config: {error:?}"); let binary_dir = settings.get_binary_dir(); let install_dir = if let Some(parent) = binary_dir.parent() { parent.to_path_buf() } else { debug!( "Failed to get parent directory of binary directory; defaulting to current directory" ); PathBuf::from(".") }; let library_dir = install_dir.join("lib"); debug!("Using library directory: {library_dir:?}"); Ok(library_dir) } } } /// Gets the shared path. /// /// # Errors /// * If an error occurs while getting the shared path. async fn get_shared_path(settings: &dyn Settings) -> Result { let command = PgConfigBuilder::from(settings).sharedir(); match execute_command(command).await { Ok((stdout, _stderr)) => Ok(PathBuf::from(stdout.trim())), Err(error) => { debug!("Failed to get shared path using pg_config: {error:?}"); let binary_dir = settings.get_binary_dir(); let install_dir = if let Some(parent) = binary_dir.parent() { parent.to_path_buf() } else { debug!( "Failed to get parent directory of binary directory; defaulting to current directory" ); PathBuf::from(".") }; let share_dir = install_dir.join("share"); debug!("Using share directory: {share_dir:?}"); Ok(share_dir) } } } /// Gets the extension path. /// /// # Errors /// * If an error occurs while getting the extension path. async fn get_extension_path(settings: &dyn Settings) -> Result { let shared_path = get_shared_path(settings).await?; let extension_path = shared_path.join("extension"); Ok(extension_path) } /// Gets the PostgreSQL version. /// /// # Errors /// * If an error occurs while getting the PostgreSQL version. async fn get_postgresql_version(settings: &dyn Settings) -> Result { let command = PostgresBuilder::new() .program_dir(settings.get_binary_dir()) .version(); let (stdout, _stderr) = execute_command(command).await?; let re = Regex::new(r"PostgreSQL\)\s(\d+\.\d+)")?; let Some(captures) = re.captures(&stdout) else { return Err(IoError(format!( "Failed to obtain postgresql version from {stdout}" ))); }; let Some(version) = captures.get(1) else { return Err(IoError(format!( "Failed to match postgresql version from {stdout}" ))); }; let version = version.as_str(); debug!("Obtained PostgreSQL version from postgres command: {version}"); Ok(version.to_string()) } #[cfg(not(feature = "tokio"))] /// Execute a command and return the stdout and stderr as strings. #[instrument(level = "debug", skip(command_builder), fields(program = ?command_builder.get_program()))] async fn execute_command( command_builder: B, ) -> postgresql_commands::Result<(String, String)> { let mut command = command_builder.build(); command.execute() } #[cfg(feature = "tokio")] /// Execute a command and return the stdout and stderr as strings. #[instrument(level = "debug", skip(command_builder), fields(program = ?command_builder.get_program()))] async fn execute_command( command_builder: B, ) -> postgresql_commands::Result<(String, String)> { let mut command = command_builder.build_tokio(); command.execute(None).await } #[cfg(test)] mod tests { use super::*; use crate::TestSettings; #[tokio::test] async fn test_get_installed_extensions() -> Result<()> { let extensions = get_installed_extensions(&TestSettings).await?; assert!(extensions.is_empty()); Ok(()) } } ================================================ FILE: postgresql_extensions/src/lib.rs ================================================ //! # PostgreSQL Extensions //! //! [![ci](https://github.com/theseus-rs/postgresql-embedded/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/theseus-rs/postgresql-embedded/actions/workflows/ci.yml) //! [![Documentation](https://docs.rs/postgresql_extensions/badge.svg)](https://docs.rs/postgresql_extensions) //! [![Code Coverage](https://codecov.io/gh/theseus-rs/postgresql-embedded/branch/main/graph/badge.svg)](https://codecov.io/gh/theseus-rs/postgresql-embedded) //! [![Benchmarks](https://img.shields.io/badge/%F0%9F%90%B0_bencher-enabled-6ec241)](https://bencher.dev/perf/theseus-rs-postgresql-embedded) //! [![Latest version](https://img.shields.io/crates/v/postgresql_extensions.svg)](https://crates.io/crates/postgresql_extensions) //! [![License](https://img.shields.io/crates/l/postgresql_extensions?)](https://github.com/theseus-rs/postgresql-embedded/tree/main/postgresql_extensions#license) //! [![Semantic Versioning](https://img.shields.io/badge/%E2%9A%99%EF%B8%8F_SemVer-2.0.0-blue)](https://semver.org/spec/v2.0.0.html) //! //! A configurable library for managing PostgreSQL extensions. //! //! ## Examples //! //! ### Asynchronous API //! //! ```rust //! use postgresql_extensions::{get_available_extensions, Result}; //! //! #[tokio::main] //! async fn main() -> Result<()> { //! let extensions = get_available_extensions().await?; //! Ok(()) //! } //! ``` //! //! ### Synchronous API //! //! ```rust //! #[cfg(feature = "blocking")] { //! use postgresql_extensions::Result; //! use postgresql_extensions::blocking::get_available_extensions; //! //! let extensions = get_available_extensions().unwrap(); //! } //! ``` //! //! ## Feature flags //! //! postgresql_extensions uses [feature flags] to address compile time and binary size //! uses. //! //! The following features are available: //! //! | Name | Description | Default? | //! |--------------|----------------------------|----------| //! | `blocking` | Enables the blocking API | No | //! | `native-tls` | Enables native-tls support | Yes | //! | `rustls-tls` | Enables rustls-tls support | No | //! //! ### Repositories //! //! | Name | Description | Default? | //! |----------------|-------------------------------------------|----------| //! | `portal-corp` | Enables PortalCorp PostgreSQL extensions | Yes | //! | `steampipe` | Enables Steampipe PostgreSQL extensions | Yes | //! | `tensor-chord` | Enables TensorChord PostgreSQL extensions | Yes | //! //! ## Supported platforms //! //! `postgresql_extensions` provides implementations for the following: //! //! * [steampipe/repositories](https://github.com/orgs/turbot/repositories) //! * [tensor-chord/pgvecto.rs](https://github.com/tensor-chord/pgvecto.rs) //! //! ## Safety //! //! This crate uses `#![forbid(unsafe_code)]` to ensure everything is implemented in 100% safe Rust. //! //! ## License //! //! Licensed under either of //! //! * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or ) //! * MIT license ([LICENSE-MIT](LICENSE-MIT) or ) //! //! at your option. //! //! ## Contribution //! //! Unless you explicitly state otherwise, any contribution intentionally submitted //! for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any //! additional terms or conditions. #[cfg(feature = "blocking")] pub mod blocking; mod error; pub mod extensions; mod matcher; mod model; pub mod repository; pub use error::{Error, Result}; pub use extensions::{get_available_extensions, get_installed_extensions, install, uninstall}; pub use matcher::{matcher, tar_gz_matcher, zip_matcher}; #[cfg(test)] pub use model::TestSettings; pub use model::{AvailableExtension, InstalledConfiguration, InstalledExtension}; pub use semver::{Version, VersionReq}; ================================================ FILE: postgresql_extensions/src/matcher.rs ================================================ use postgresql_archive::Result; use regex_lite::Regex; use semver::Version; use std::collections::HashMap; use std::env::consts; use url::Url; /// .tar.gz asset matcher that matches the asset name to the postgresql major version, target triple /// or OS/CPU architecture. /// /// # Errors /// * If the asset matcher fails. pub fn tar_gz_matcher(url: &str, name: &str, version: &Version) -> Result { if !matcher(url, name, version)? { return Ok(false); } Ok(name.ends_with(".tar.gz")) } /// .zip asset matcher that matches the asset name to the postgresql major version, target triple or /// OS/CPU architecture. /// /// # Errors /// * If the asset matcher fails. #[expect(clippy::case_sensitive_file_extension_comparisons)] pub fn zip_matcher(url: &str, name: &str, version: &Version) -> Result { if !matcher(url, name, version)? { return Ok(false); } Ok(name.ends_with(".zip")) } /// Default asset matcher that matches the asset name to the postgresql major version, target triple /// or OS/CPU architecture. /// /// # Errors /// * If the asset matcher fails. pub fn matcher(url: &str, name: &str, _version: &Version) -> Result { let Ok(url) = Url::parse(url) else { return Ok(false); }; let query_parameters: HashMap = url.query_pairs().into_owned().collect(); let Some(postgresql_version) = query_parameters.get("postgresql_version") else { return Ok(false); }; let postgresql_major_version = match postgresql_version.split_once('.') { None => return Ok(false), Some((major, _)) => major, }; let postgresql_version = format!("pg{postgresql_major_version}"); let postgresql_version_re = regex(postgresql_version.as_str())?; if !postgresql_version_re.is_match(name) { return Ok(false); } let target_re = regex(target_triple::TARGET)?; if target_re.is_match(name) { return Ok(true); } let os = consts::OS; let os_re = regex(os)?; let matches_os = match os { "macos" => { let darwin_re = regex("darwin")?; os_re.is_match(name) || darwin_re.is_match(name) } _ => os_re.is_match(name), }; let arch = consts::ARCH; let arch_re = regex(arch)?; let matches_arch = match arch { "x86_64" => { let amd64_re = regex("amd64")?; arch_re.is_match(name) || amd64_re.is_match(name) } "aarch64" => { let arm64_re = regex("arm64")?; arch_re.is_match(name) || arm64_re.is_match(name) } _ => arch_re.is_match(name), }; if matches_os && matches_arch { return Ok(true); } Ok(false) } /// Creates a new regex for the specified key. /// /// # Arguments /// * `key` - The key to create the regex for. /// /// # Returns /// * The regex. /// /// # Errors /// * If the regex cannot be created. fn regex(key: &str) -> Result { let regex = Regex::new(format!(r"[\W_]{key}[\W_]").as_str())?; Ok(regex) } #[cfg(test)] mod tests { use super::*; use anyhow::Result; #[test] fn test_invalid_url() -> Result<()> { let url = "^"; assert!(!matcher(url, "", &Version::new(0, 0, 0))?); Ok(()) } #[test] fn test_no_version() -> Result<()> { assert!(!matcher("https://foo", "", &Version::new(0, 0, 0))?); Ok(()) } #[test] fn test_invalid_version() -> Result<()> { assert!(!matcher( "https://foo?postgresql_version=16", "", &Version::new(0, 0, 0) )?); Ok(()) } #[test] fn test_tar_gz_matcher() -> Result<()> { let postgresql_major_version = 16; let url = format!("https://foo?postgresql_version={postgresql_major_version}.3"); let version = Version::parse("1.2.3")?; let target = target_triple::TARGET; let valid_name = format!("postgresql-pg{postgresql_major_version}-{target}.tar.gz"); let invalid_name = format!("postgresql-pg{postgresql_major_version}-{target}.zip"); assert!( tar_gz_matcher(url.as_str(), valid_name.as_str(), &version)?, "{}", valid_name ); assert!( !tar_gz_matcher(url.as_str(), invalid_name.as_str(), &version)?, "{}", invalid_name ); Ok(()) } #[test] fn test_zip_matcher() -> Result<()> { let postgresql_major_version = 16; let url = format!("https://foo?postgresql_version={postgresql_major_version}.3"); let version = Version::parse("1.2.3")?; let target = target_triple::TARGET; let valid_name = format!("postgresql-pg{postgresql_major_version}-{target}.zip"); let invalid_name = format!("postgresql-pg{postgresql_major_version}-{target}.tar.gz"); assert!( zip_matcher(url.as_str(), valid_name.as_str(), &version)?, "{}", valid_name ); assert!( !zip_matcher(url.as_str(), invalid_name.as_str(), &version)?, "{}", invalid_name ); Ok(()) } #[test] fn test_matcher_success() -> Result<()> { let postgresql_major_version = 16; let url = format!("https://foo?postgresql_version={postgresql_major_version}.3"); let version = Version::parse("1.2.3")?; let target = target_triple::TARGET; let os = consts::OS; let arch = consts::ARCH; let names = vec![ format!("postgresql-pg{postgresql_major_version}-{target}.zip"), format!("postgresql-pg{postgresql_major_version}-{os}-{arch}.zip"), format!("postgresql-pg{postgresql_major_version}-{target}.tar.gz"), format!("postgresql-pg{postgresql_major_version}-{os}-{arch}.tar.gz"), format!("foo.{target}.pg{postgresql_major_version}.tar.gz"), format!("foo.{os}.{arch}.pg{postgresql_major_version}.tar.gz"), format!("foo-{arch}-{os}-pg{postgresql_major_version}.tar.gz"), format!("foo_{arch}_{os}_pg{postgresql_major_version}.tar.gz"), ]; for name in names { assert!(matcher(url.as_str(), name.as_str(), &version)?, "{}", name); } Ok(()) } #[test] fn test_matcher_errors() -> Result<()> { let postgresql_major_version = 16; let url = format!("https://foo?postgresql_version={postgresql_major_version}.3"); let version = Version::parse("1.2.3")?; let target = target_triple::TARGET; let os = consts::OS; let arch = consts::ARCH; let names = vec![ format!("foo-pg{postgresql_major_version}.tar.gz"), format!("foo-{target}.tar.gz"), format!("foo-pg{postgresql_major_version}-{os}.tar.gz"), format!("foo-pg{postgresql_major_version}-{arch}.tar.gz"), format!("foo-pg{postgresql_major_version}{os}-{arch}.tar"), format!("foo-pg{postgresql_major_version}-{os}{arch}.tar.gz"), ]; for name in names { assert!(!matcher(url.as_str(), name.as_str(), &version)?, "{}", name); } Ok(()) } } ================================================ FILE: postgresql_extensions/src/model.rs ================================================ use crate::Error::IoError; use crate::Result; use semver::Version; use serde::{Deserialize, Serialize}; #[cfg(test)] use std::ffi::OsString; use std::fmt::Display; #[cfg(not(feature = "tokio"))] use std::io::Write; use std::path::PathBuf; #[cfg(feature = "tokio")] use tokio::io::{AsyncReadExt, AsyncWriteExt}; /// A struct representing an available extension. #[derive(Debug)] pub struct AvailableExtension { namespace: String, name: String, description: String, } impl AvailableExtension { /// Creates a new available extension. #[must_use] pub fn new(namespace: &str, name: &str, description: &str) -> Self { Self { namespace: namespace.to_string(), name: name.to_string(), description: description.to_string(), } } /// Gets the namespace of the extension. #[must_use] pub fn namespace(&self) -> &str { &self.namespace } /// Gets the name of the extension. #[must_use] pub fn name(&self) -> &str { &self.name } /// Gets the description of the extension. #[must_use] pub fn description(&self) -> &str { &self.description } } impl Display for AvailableExtension { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}:{} {}", self.namespace, self.name, self.description) } } /// A struct representing an installed configuration. #[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)] pub struct InstalledConfiguration { extensions: Vec, } impl InstalledConfiguration { /// Creates a new installed configuration. #[must_use] pub fn new(extensions: Vec) -> Self { Self { extensions } } /// Reads the configuration from the specified `path`. /// /// # Errors /// * If an error occurs while reading the configuration. pub async fn read>(path: P) -> Result { #[cfg(feature = "tokio")] { let mut file = tokio::fs::File::open(path.into()) .await .map_err(|error| IoError(error.to_string()))?; let mut contents = vec![]; file.read_to_end(&mut contents) .await .map_err(|error| IoError(error.to_string()))?; let config = serde_json::from_slice(&contents)?; Ok(config) } #[cfg(not(feature = "tokio"))] { let file = std::fs::File::open(path.into()).map_err(|error| IoError(error.to_string()))?; let reader = std::io::BufReader::new(file); let config = serde_json::from_reader(reader).map_err(|error| IoError(error.to_string()))?; Ok(config) } } /// Writes the configuration to the specified `path`. /// /// # Errors /// * If an error occurs while writing the configuration. pub async fn write>(&self, path: P) -> Result<()> { let content = serde_json::to_string_pretty(&self)?; #[cfg(feature = "tokio")] { let mut file = tokio::fs::File::create(path.into()) .await .map_err(|error| IoError(error.to_string()))?; file.write_all(content.as_bytes()) .await .map_err(|error| IoError(error.to_string()))?; } #[cfg(not(feature = "tokio"))] { let mut file = std::fs::File::create(path.into()).map_err(|error| IoError(error.to_string()))?; file.write_all(content.as_bytes()) .map_err(|error| IoError(error.to_string()))?; } Ok(()) } /// Gets the extensions of the configuration. #[must_use] pub fn extensions(&self) -> &Vec { &self.extensions } /// Gets the extensions of the configuration. #[must_use] pub fn extensions_mut(&mut self) -> &mut Vec { &mut self.extensions } } /// A struct representing an installed extension. #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct InstalledExtension { namespace: String, name: String, version: Version, files: Vec, } impl InstalledExtension { /// Creates a new installed extension. #[must_use] pub fn new(namespace: &str, name: &str, version: Version, files: Vec) -> Self { Self { namespace: namespace.to_string(), name: name.to_string(), version, files, } } /// Gets the namespace of the extension. #[must_use] pub fn namespace(&self) -> &str { &self.namespace } /// Gets the name of the extension. #[must_use] pub fn name(&self) -> &str { &self.name } /// Gets the version of the extension. #[must_use] pub fn version(&self) -> &Version { &self.version } /// Gets the files of the extension. #[must_use] pub fn files(&self) -> &Vec { &self.files } } impl Display for InstalledExtension { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}:{}:{}", self.namespace, self.name, self.version) } } #[cfg(test)] pub struct TestSettings; #[cfg(test)] impl postgresql_commands::Settings for TestSettings { fn get_binary_dir(&self) -> PathBuf { PathBuf::from(".") } fn get_host(&self) -> OsString { "localhost".into() } fn get_port(&self) -> u16 { 5432 } fn get_username(&self) -> OsString { "postgres".into() } fn get_password(&self) -> OsString { "password".into() } } #[cfg(test)] mod tests { use super::*; use postgresql_commands::Settings; #[test] fn test_settings() { let settings = TestSettings; assert_eq!(settings.get_binary_dir(), PathBuf::from(".")); assert_eq!(settings.get_host(), "localhost"); assert_eq!(settings.get_port(), 5432); assert_eq!(settings.get_username(), "postgres"); assert_eq!(settings.get_password(), "password"); } #[test] fn test_available_extension() { let available_extension = AvailableExtension::new("namespace", "name", "description"); assert_eq!(available_extension.namespace(), "namespace"); assert_eq!(available_extension.name(), "name"); assert_eq!(available_extension.description(), "description"); assert_eq!( available_extension.to_string(), "namespace:name description" ); } #[test] fn test_installed_configuration() { let installed_configuration = InstalledConfiguration::new(vec![]); assert!(installed_configuration.extensions.is_empty()); } #[cfg(target_os = "linux")] #[tokio::test] async fn test_installed_configuration_io() -> Result<()> { let temp_file = tempfile::NamedTempFile::new().map_err(|error| IoError(error.to_string()))?; let file = temp_file.as_ref(); let extensions = vec![InstalledExtension::new( "namespace", "name", Version::new(1, 0, 0), vec![PathBuf::from("file")], )]; let expected_configuration = InstalledConfiguration::new(extensions); expected_configuration.write(file).await?; let configuration = InstalledConfiguration::read(file).await?; assert_eq!(expected_configuration, configuration); tokio::fs::remove_file(file) .await .map_err(|error| IoError(error.to_string()))?; Ok(()) } #[test] fn test_installed_extension() { let installed_extension = InstalledExtension::new( "namespace", "name", Version::new(1, 0, 0), vec![PathBuf::from("file")], ); assert_eq!(installed_extension.namespace(), "namespace"); assert_eq!(installed_extension.name(), "name"); assert_eq!(installed_extension.version(), &Version::new(1, 0, 0)); assert_eq!(installed_extension.files(), &vec![PathBuf::from("file")]); assert_eq!(installed_extension.to_string(), "namespace:name:1.0.0"); } } ================================================ FILE: postgresql_extensions/src/repository/mod.rs ================================================ pub mod model; #[cfg(feature = "portal-corp")] pub mod portal_corp; pub mod registry; #[cfg(feature = "steampipe")] pub mod steampipe; #[cfg(feature = "tensor-chord")] pub mod tensor_chord; pub use model::Repository; ================================================ FILE: postgresql_extensions/src/repository/model.rs ================================================ use crate::Result; use crate::model::AvailableExtension; use async_trait::async_trait; use semver::{Version, VersionReq}; use std::fmt::Debug; use std::path::PathBuf; /// A trait for archive repository implementations. #[async_trait] pub trait Repository: Debug + Send + Sync { /// Gets the name of the repository. fn name(&self) -> &str; /// Gets the available extensions. /// /// # Errors /// * if an error occurs while getting the extensions. async fn get_available_extensions(&self) -> Result>; /// Gets the archive for the extension with the specified `name` and `version`. /// /// # Errors /// * if an error occurs while getting the archive. async fn get_archive( &self, postgresql_version: &str, name: &str, version: &VersionReq, ) -> Result<(Version, Vec)>; /// Installs the extension with the specified `name` and `version`. /// /// # Errors /// * if an error occurs while installing the extension. async fn install( &self, name: &str, library_dir: PathBuf, extension_dir: PathBuf, archive: &[u8], ) -> Result>; } ================================================ FILE: postgresql_extensions/src/repository/portal_corp/mod.rs ================================================ pub mod repository; pub const URL: &str = "https://github.com/portalcorp"; ================================================ FILE: postgresql_extensions/src/repository/portal_corp/repository.rs ================================================ use crate::Result; use crate::matcher::zip_matcher; use crate::model::AvailableExtension; use crate::repository::Repository; use crate::repository::portal_corp::URL; use async_trait::async_trait; use postgresql_archive::extractor::{ExtractDirectories, zip_extract}; use postgresql_archive::get_archive; use postgresql_archive::repository::github::repository::GitHub; use regex_lite::Regex; use semver::{Version, VersionReq}; use std::fmt::Debug; use std::path::PathBuf; /// PortalCorp repository. #[derive(Debug)] pub struct PortalCorp; impl PortalCorp { /// Creates a new PortalCorp repository. /// /// # Errors /// * If the repository cannot be created #[expect(clippy::new_ret_no_self)] pub fn new() -> Result> { Ok(Box::new(Self)) } /// Initializes the repository. /// /// # Errors /// * If the repository cannot be initialized. pub fn initialize() -> Result<()> { postgresql_archive::matcher::registry::register( |url| Ok(url.starts_with(URL)), zip_matcher, )?; postgresql_archive::repository::registry::register( |url| Ok(url.starts_with(URL)), Box::new(GitHub::new), )?; Ok(()) } } #[async_trait] impl Repository for PortalCorp { fn name(&self) -> &'static str { "portal-corp" } async fn get_available_extensions(&self) -> Result> { let extensions = vec![AvailableExtension::new( self.name(), "pgvector_compiled", "Precompiled OS packages for pgvector", )]; Ok(extensions) } async fn get_archive( &self, postgresql_version: &str, name: &str, version: &VersionReq, ) -> Result<(Version, Vec)> { let url = format!("{URL}/{name}?postgresql_version={postgresql_version}"); let archive = get_archive(url.as_str(), version).await?; Ok(archive) } async fn install( &self, _name: &str, library_dir: PathBuf, extension_dir: PathBuf, archive: &[u8], ) -> Result> { let mut extract_directories = ExtractDirectories::default(); extract_directories.add_mapping(Regex::new(r"\.(dll|dylib|so)$")?, library_dir); extract_directories.add_mapping(Regex::new(r"\.(control|sql)$")?, extension_dir); let bytes = &archive.to_vec(); let files = zip_extract(bytes, &extract_directories)?; Ok(files) } } #[cfg(test)] mod tests { use super::*; use crate::repository::Repository; #[test] fn test_name() { let repository = PortalCorp; assert_eq!("portal-corp", repository.name()); } #[tokio::test] async fn test_get_available_extensions() -> Result<()> { let repository = PortalCorp; let extensions = repository.get_available_extensions().await?; let extension = &extensions[0]; assert_eq!("pgvector_compiled", extension.name()); assert_eq!( "Precompiled OS packages for pgvector", extension.description() ); Ok(()) } } ================================================ FILE: postgresql_extensions/src/repository/registry.rs ================================================ use crate::Error::UnsupportedNamespace; use crate::Result; use crate::repository::model::Repository; #[cfg(feature = "portal-corp")] use crate::repository::portal_corp::repository::PortalCorp; #[cfg(feature = "steampipe")] use crate::repository::steampipe::repository::Steampipe; #[cfg(feature = "tensor-chord")] use crate::repository::tensor_chord::repository::TensorChord; use std::collections::HashMap; use std::sync::{Arc, LazyLock, Mutex, RwLock}; static REGISTRY: LazyLock>> = LazyLock::new(|| Arc::new(Mutex::new(RepositoryRegistry::default()))); type NewFn = dyn Fn() -> Result> + Send + Sync; /// Singleton struct to store repositories struct RepositoryRegistry { repositories: HashMap>>, } impl RepositoryRegistry { /// Creates a new repository registry. fn new() -> Self { Self { repositories: HashMap::new(), } } /// Registers a repository. Newly registered repositories take precedence over existing ones. fn register(&mut self, namespace: &str, new_fn: Box) { let namespace = namespace.to_string(); self.repositories .insert(namespace, Arc::new(RwLock::new(new_fn))); } /// Gets a repository that supports the specified namespace /// /// # Errors /// * If the namespace is not supported. fn get(&self, namespace: &str) -> Result> { let namespace = namespace.to_string(); let Some(new_fn) = self.repositories.get(&namespace) else { return Err(UnsupportedNamespace(namespace.to_string())); }; let new_function = new_fn.read()?; new_function() } } impl Default for RepositoryRegistry { /// Creates a new repository registry with the default repositories registered. fn default() -> Self { let mut registry = Self::new(); #[cfg(feature = "portal-corp")] { registry.register("portal-corp", Box::new(PortalCorp::new)); let _ = PortalCorp::initialize(); } #[cfg(feature = "steampipe")] { registry.register("steampipe", Box::new(Steampipe::new)); let _ = Steampipe::initialize(); } #[cfg(feature = "tensor-chord")] { registry.register("tensor-chord", Box::new(TensorChord::new)); let _ = TensorChord::initialize(); } registry } } /// Registers a repository. Newly registered repositories can override existing ones. /// /// # Errors /// * If the registry is poisoned. pub fn register(namespace: &str, new_fn: Box) -> Result<()> { REGISTRY.lock()?.register(namespace, new_fn); Ok(()) } /// Gets a repository that supports the specified namespace /// /// # Errors /// * If the namespace is not supported. pub fn get(namespace: &str) -> Result> { REGISTRY.lock()?.get(namespace) } /// Gets the namespaces of the registered repositories. /// /// # Errors /// * If the registry is poisoned. pub fn get_namespaces() -> Result> { Ok(REGISTRY.lock()?.repositories.keys().cloned().collect()) } /// Gets all the registered repositories. /// /// # Errors /// * If the registry is poisoned. pub fn get_repositories() -> Result>> { let mut repositories = Vec::new(); for namespace in get_namespaces()? { let repository = get(&namespace)?; repositories.push(repository); } Ok(repositories) } #[cfg(test)] mod tests { use super::*; use crate::model::AvailableExtension; use async_trait::async_trait; use semver::{Version, VersionReq}; use std::path::PathBuf; #[derive(Debug)] struct TestRepository; impl TestRepository { #[expect(clippy::new_ret_no_self)] #[expect(clippy::unnecessary_wraps)] fn new() -> Result> { Ok(Box::new(Self)) } } #[async_trait] impl Repository for TestRepository { fn name(&self) -> &'static str { "test" } async fn get_available_extensions(&self) -> Result> { Ok(Vec::new()) } async fn get_archive( &self, _postgresql_version: &str, _name: &str, _version: &VersionReq, ) -> Result<(Version, Vec)> { Ok((Version::new(1, 0, 0), Vec::new())) } async fn install( &self, _name: &str, _library_dir: PathBuf, _extension_dir: PathBuf, _archive: &[u8], ) -> Result> { Ok(Vec::new()) } } #[tokio::test] async fn test_register() -> Result<()> { let namespace = "test"; register(namespace, Box::new(TestRepository::new))?; let repository = get(namespace)?; assert_eq!("test", repository.name()); assert!(repository.get_available_extensions().await.is_ok()); Ok(()) } #[test] fn test_get_error() { let error = get("foo").unwrap_err(); assert_eq!("unsupported namespace 'foo'", error.to_string()); } #[test] #[cfg(feature = "portal-corp")] fn test_get_portal_corp_extensions() { assert!(get("portal-corp").is_ok()); } #[test] #[cfg(feature = "steampipe")] fn test_get_steampipe_extensions() { assert!(get("steampipe").is_ok()); } #[test] #[cfg(feature = "tensor-chord")] fn test_get_tensor_chord_extensions() { assert!(get("tensor-chord").is_ok()); } #[test] fn test_get_namespaces() { let namespaces = get_namespaces().unwrap(); #[cfg(feature = "portal-corp")] assert!(namespaces.contains(&"portal-corp".to_string())); #[cfg(feature = "steampipe")] assert!(namespaces.contains(&"steampipe".to_string())); #[cfg(feature = "tensor-chord")] assert!(namespaces.contains(&"tensor-chord".to_string())); } #[test] fn test_get_repositories() { let repositories = get_repositories().unwrap(); #[cfg(feature = "steampipe")] assert!( repositories .iter() .any(|repository| repository.name() == "steampipe") ); #[cfg(feature = "tensor-chord")] assert!( repositories .iter() .any(|repository| repository.name() == "tensor-chord") ); } } ================================================ FILE: postgresql_extensions/src/repository/steampipe/extensions.rs ================================================ use std::sync::LazyLock; static EXTENSIONS: LazyLock> = LazyLock::new(init_extensions); #[expect(clippy::too_many_lines)] fn init_extensions() -> Vec { vec![ SteampipeExtension::new( "abuseipdb", "Steampipe plugin to query IP address abuse data and more from AbuseIPDB.", "https://github.com/turbot/steampipe-plugin-abuseipdb", ), SteampipeExtension::new( "airtable", "Steampipe plugin for querying Airtable.", "https://github.com/francois2metz/steampipe-plugin-airtable", ), SteampipeExtension::new( "aiven", "Steampipe plugin to query accounts, projects, teams, users and more from Aiven.", "https://github.com/turbot/steampipe-plugin-aiven", ), SteampipeExtension::new( "algolia", "Steampipe plugin for querying Algolia indexes, logs and more.", "https://github.com/turbot/steampipe-plugin-algolia", ), SteampipeExtension::new( "alicloud", "Steampipe plugin for querying Alibaba Cloud servers, databases, networks, and other resources.", "https://github.com/turbot/steampipe-plugin-alicloud", ), SteampipeExtension::new( "ansible", "Steampipe plugin to query configurations from the Ansible playbooks.", "https://github.com/turbot/steampipe-plugin-ansible", ), SteampipeExtension::new( "auth0", "Use SQL to query users, clients, connections, keys and more from Auth0.", "https://github.com/turbot/steampipe-plugin-auth0", ), SteampipeExtension::new( "aws", "Steampipe plugin for querying instances, buckets, databases and more from AWS.", "https://github.com/turbot/steampipe-plugin-aws", ), SteampipeExtension::new( "awscfn", "Steampipe plugin to query data from AWS CloudFormation template files.", "https://github.com/turbot/steampipe-plugin-awscfn", ), SteampipeExtension::new( "azure", "Steampipe plugin for querying resource groups, virtual machines, storage accounts and more from Azure.", "https://github.com/turbot/steampipe-plugin-azure", ), SteampipeExtension::new( "azuread", "Steampipe plugin for querying resource users, groups, applications and more from Azure Active Directory.", "https://github.com/turbot/steampipe-plugin-azuread", ), SteampipeExtension::new( "azuredevops", "Steampipe plugin to query projects, groups, builds and more from Azure DevOps.", "https://github.com/turbot/steampipe-plugin-azuredevops", ), SteampipeExtension::new( "baleen", "Steampipe plugin for querying Baleen.", "https://github.com/francois2metz/steampipe-plugin-baleen", ), SteampipeExtension::new( "bitbucket", "Steampipe plugin for querying repositories, issues, pull requests and more from Bitbucket.", "https://github.com/turbot/steampipe-plugin-bitbucket", ), SteampipeExtension::new( "bitfinex", "Steampipe plugin for querying data from bitfinex", "https://github.com/kaggrwal/steampipe-plugin-bitfinex", ), SteampipeExtension::new( "btp", "Steampipe plugin to query the account details of your SAP Business Technology Platform account.", "https://github.com/ajmaradiaga/steampipe-plugin-btp", ), SteampipeExtension::new( "buildkite", "Steampipe plugin to query Buildkite pipelines, builds, users and more.", "https://github.com/turbot/steampipe-plugin-buildkite", ), SteampipeExtension::new( "chaos", "Steampipe plugin to cause chaos for testing Steampipe with the craziest edge cases we can think of.", "https://github.com/turbot/steampipe-plugin-chaos", ), SteampipeExtension::new( "chaosdynamic", "Steampipe plugin to test aggregation of dynamic plugin connections.", "https://github.com/turbot/steampipe-plugin-chaosdynamic", ), SteampipeExtension::new( "circleci", "Steampipe plugin for querying resource projects, pipelines, builds and more from CircleCI.", "https://github.com/turbot/steampipe-plugin-circleci", ), SteampipeExtension::new( "clickup", "Steampipe plugin for querying ClickUp Tasks, Lists and other resources.", "https://github.com/theapsgroup/steampipe-plugin-clickup", ), SteampipeExtension::new( "cloudflare", "Steampipe plugin for querying Cloudflare databases, networks, and other resources.", "https://github.com/turbot/steampipe-plugin-cloudflare", ), SteampipeExtension::new( "code", "Steampipe plugin to query secrets and more from Code.", "https://github.com/turbot/steampipe-plugin-code", ), SteampipeExtension::new( "cohereai", "Steampipe plugin to query generations, classifications and more from CohereAI.", "https://github.com/mr-destructive/steampipe-plugin-cohereai", ), SteampipeExtension::new( "config", "Steampipe plugin to query data from various types of files like INI, JSON, YML and more.", "https://github.com/turbot/steampipe-plugin-config", ), SteampipeExtension::new( "confluence", "Steampipe plugin for querying pages, spaces, and more from Confluence.", "https://github.com/ellisvalentiner/steampipe-plugin-confluence", ), SteampipeExtension::new( "consul", "Steampipe plugin to query nodes, ACLs, services and more from Consul.", "https://github.com/turbot/steampipe-plugin-consul", ), SteampipeExtension::new( "crowdstrike", "Steampipe plugin to query resources from CrowdStrike.", "https://github.com/turbot/steampipe-plugin-crowdstrike", ), SteampipeExtension::new( "crtsh", "Steampipe plugin to query certificates, logs and more from the crt.sh certificate transparency database.", "https://github.com/turbot/steampipe-plugin-crtsh", ), SteampipeExtension::new( "csv", "Steampipe plugin to query data from CSV files.", "https://github.com/turbot/steampipe-plugin-csv", ), SteampipeExtension::new( "databricks", "Steampipe plugin to query clusters, jobs, users, and more from Databricks.", "https://github.com/turbot/steampipe-plugin-databricks", ), SteampipeExtension::new( "datadog", "Steampipe plugin for querying dashboards, users, roles and more from Datadog.", "https://github.com/turbot/steampipe-plugin-datadog", ), SteampipeExtension::new( "digitalocean", "Steampipe plugin for querying DigitalOcean databases, networks, and other resources.", "https://github.com/turbot/steampipe-plugin-digitalocean", ), SteampipeExtension::new( "docker", "Steampipe plugin to query Dockerfile commands and more from Docker.", "https://github.com/turbot/steampipe-plugin-docker", ), SteampipeExtension::new( "dockerhub", "Steampipe plugin for querying Docker Hub repositories, tags and other resources.", "https://github.com/turbot/steampipe-plugin-dockerhub", ), SteampipeExtension::new( "doppler", "Steampipe plugin to query projects, environments, secrets and more from Doppler.", "https://github.com/turbot/steampipe-plugin-doppler", ), SteampipeExtension::new( "duo", "Steampipe plugin for querying Duo Security users, logs and more.", "https://github.com/turbot/steampipe-plugin-duo", ), SteampipeExtension::new( "env0", "Steampipe plugin to query projects, teams, users and more from env0.", "https://github.com/turbot/steampipe-plugin-env0", ), SteampipeExtension::new( "equinix", "Steampipe plugin for querying Equinix Metal servers, networks, facilities and more.", "https://github.com/turbot/steampipe-plugin-equinix", ), SteampipeExtension::new( "exec", "Steampipe plugin to run & query shell commands on local and remote servers.", "https://github.com/turbot/steampipe-plugin-exec", ), SteampipeExtension::new( "fastly", "Steampipe plugin to query services, acls, domains and more from Fastly.", "https://github.com/turbot/steampipe-plugin-fastly", ), SteampipeExtension::new( "finance", "Steampipe plugin to query financial data including quotes and public company information.", "https://github.com/turbot/steampipe-plugin-finance", ), SteampipeExtension::new( "fly", "Steampipe plugin to query applications, volumes, databases, and more from your Fly organization.", "https://github.com/turbot/steampipe-plugin-fly", ), SteampipeExtension::new( "freshping", "Steampipe plugin for querying Freshping.", "https://github.com/francois2metz/steampipe-plugin-freshping", ), SteampipeExtension::new( "freshservice", "Steampipe plugin for querying FreshService agents, assets, tickets and other resources.", "https://github.com/theapsgroup/steampipe-plugin-freshservice", ), SteampipeExtension::new( "gandi", "Steampipe plugin for querying domains, mailboxes, certificates and more from Gandi.", "https://github.com/francois2metz/steampipe-plugin-gandi", ), SteampipeExtension::new( "gcp", "Steampipe plugin for querying buckets, instances, functions and more from GCP.", "https://github.com/turbot/steampipe-plugin-gcp", ), SteampipeExtension::new( "gitguardian", "Steampipe plugin for querying incidents from GitGuardian.", "https://github.com/francois2metz/steampipe-plugin-gitguardian", ), SteampipeExtension::new( "github", "Steampipe plugin for querying GitHub Repositories, Organizations, and other resources.", "https://github.com/turbot/steampipe-plugin-github", ), SteampipeExtension::new( "gitlab", "Steampipe plugin for querying GitLab Repositories, Users and other resources.", "https://github.com/theapsgroup/steampipe-plugin-gitlab", ), SteampipeExtension::new( "godaddy", "Steampipe plugin to query domains, orders, certificates and more from GoDaddy.", "https://github.com/turbot/steampipe-plugin-godaddy", ), SteampipeExtension::new( "googledirectory", "Steampipe plugin for querying users, groups, org units and more from your Google Workspace directory.", "https://github.com/turbot/steampipe-plugin-googledirectory", ), SteampipeExtension::new( "googlesearchconsole", "Steampipe plugin for query data from Google Search Console (GSC).", "https://github.com/turbot/steampipe-plugin-googlesearchconsole", ), SteampipeExtension::new( "googlesheets", "Steampipe plugin for query data from Google Sheets.", "https://github.com/turbot/steampipe-plugin-googlesheets", ), SteampipeExtension::new( "googleworkspace", "Steampipe plugin for querying users, groups, org units and more from your Google Workspace.", "https://github.com/turbot/steampipe-plugin-googleworkspace", ), SteampipeExtension::new( "grafana", "Steampipe plugin to query dashboards, data sources and more from Grafana.", "https://github.com/turbot/steampipe-plugin-grafana", ), SteampipeExtension::new( "guardrails", "Steampipe plugin to query resources, controls, policies and more from Turbot Guardrails.", "https://github.com/turbot/steampipe-plugin-guardrails", ), SteampipeExtension::new( "hackernews", "Steampipe plugin to query stories, items and users from Hacker News.", "https://github.com/turbot/steampipe-plugin-hackernews", ), SteampipeExtension::new( "hcloud", "Steampipe plugin to query servers, networks and more from Hetzner Cloud.", "https://github.com/turbot/steampipe-plugin-hcloud", ), SteampipeExtension::new( "heroku", "Steampipe plugin to query apps, dynos and more from Heroku.", "https://github.com/turbot/steampipe-plugin-heroku", ), SteampipeExtension::new( "hibp", "Steampipe plugin to query breaches, account breaches, pastes and passwords from Have I Been Pwned.", "https://github.com/turbot/steampipe-plugin-hibp", ), SteampipeExtension::new( "hubspot", "Steampipe plugin to query contacts, deals, tickets and more from HubSpot.", "https://github.com/turbot/steampipe-plugin-hubspot", ), SteampipeExtension::new( "hypothesis", "Steampipe plugin to query Hypothesis annotations.", "https://github.com/turbot/steampipe-plugin-hypothesis", ), SteampipeExtension::new( "ibm", "Steampipe plugin to query resources, users and more from IBM Cloud.", "https://github.com/turbot/steampipe-plugin-ibm", ), SteampipeExtension::new( "imap", "Steampipe plugin to query mailboxes and messages using IMAP.", "https://github.com/turbot/steampipe-plugin-imap", ), SteampipeExtension::new( "ip2locationio", "Steampipe plugin to query IP geolocation or WHOIS information from ip2location.io.", "https://github.com/ip2location/steampipe-plugin-ip2locationio", ), SteampipeExtension::new( "ipinfo", "Steampipe plugin to query IP address information from ipinfo.io.", "https://github.com/turbot/steampipe-plugin-ipinfo", ), SteampipeExtension::new( "ipstack", "Steampipe plugin for querying location, currency, timezone and security information about an IP address from ipstack.", "https://github.com/turbot/steampipe-plugin-ipstack", ), SteampipeExtension::new( "jenkins", "Steampipe plugin for querying resource jobs, builds, nodes, plugin and more from Jenkins.", "https://github.com/turbot/steampipe-plugin-jenkins", ), SteampipeExtension::new( "jira", "Steampipe plugin for querying sprints, issues, epics and more from Jira.", "https://github.com/turbot/steampipe-plugin-jira", ), SteampipeExtension::new( "jumpcloud", "Steampipe plugin to query servers, applications, user groups, and more from your JumpCloud organization.", "https://github.com/turbot/steampipe-plugin-jumpcloud", ), SteampipeExtension::new( "keycloak", "Steampipe plugin for querying Keycloak users, groups and other resources.", "https://github.com/theapsgroup/steampipe-plugin-keycloak", ), SteampipeExtension::new( "kolide", "Kolide gives you accurate, valuable and complete fleet visibility across Mac, Windows and Linux endpoints", "https://github.com/grendel-consulting/steampipe-plugin-kolide", ), SteampipeExtension::new( "kubernetes", "Steampipe plugin for Kubernetes components.", "https://github.com/turbot/steampipe-plugin-kubernetes", ), SteampipeExtension::new( "launchdarkly", "Steampipe plugin to query projects, teams, metrics, flags and more from LaunchDarkly.", "https://github.com/turbot/steampipe-plugin-launchdarkly", ), SteampipeExtension::new( "ldap", "Steampipe plugin for querying users, groups, organizational units and more from LDAP.", "https://github.com/turbot/steampipe-plugin-ldap", ), SteampipeExtension::new( "linear", "Steampipe plugin to query issues, teams, users and more from Linear.", "https://github.com/turbot/steampipe-plugin-linear", ), SteampipeExtension::new( "linkedin", "Steampipe plugin to query LinkedIn profiles.", "https://github.com/turbot/steampipe-plugin-linkedin", ), SteampipeExtension::new( "linode", "Steampipe plugin to query resources, users and more from Linode.", "https://github.com/turbot/steampipe-plugin-linode", ), SteampipeExtension::new( "mailchimp", "Steampipe plugin to query audiences, automation workflows, campaigns, and more from Mailchimp.", "https://github.com/turbot/steampipe-plugin-mailchimp", ), SteampipeExtension::new( "make", "Make plugin for exploring your automations in depth.", "https://github.com/marekjalovec/steampipe-plugin-make", ), SteampipeExtension::new( "mastodon", "Use SQL to instantly query Mastodon timelines, accounts, followers and more.", "https://github.com/turbot/steampipe-plugin-mastodon", ), SteampipeExtension::new( "microsoft365", "Steampipe plugin for querying calendars, contacts, drives, mailboxes and more from Microsoft 365.", "https://github.com/turbot/steampipe-plugin-microsoft365", ), SteampipeExtension::new( "mongodbatlas", "Steampipe plugin for querying clusters, users, teams and more from MongoDB Atlas.", "https://github.com/turbot/steampipe-plugin-mongodbatlas", ), SteampipeExtension::new( "namecheap", "Steampipe plugin to query domains, DNS host records and more from Namecheap.", "https://github.com/turbot/steampipe-plugin-namecheap", ), SteampipeExtension::new( "net", "Steampipe plugin for querying DNS records, certificates and other network information.", "https://github.com/turbot/steampipe-plugin-net", ), SteampipeExtension::new( "newrelic", "Steampipe plugin for querying New Relic Alerts, Events and other resources.", "https://github.com/turbot/steampipe-plugin-newrelic", ), SteampipeExtension::new( "nomad", "Steampipe plugin to query nodes, jobs, deployments and more from Nomad.", "https://github.com/turbot/steampipe-plugin-nomad", ), SteampipeExtension::new( "oci", "Steampipe plugin for Oracle Cloud Infrastructure services and resource types.", "https://github.com/turbot/steampipe-plugin-oci", ), SteampipeExtension::new( "okta", "Steampipe plugin for querying resource users, groups, applications and more from Okta.", "https://github.com/turbot/steampipe-plugin-okta", ), SteampipeExtension::new( "onepassword", "Steampipe plugin to query vaults, items, files and more from 1Password.", "https://github.com/turbot/steampipe-plugin-onepassword", ), SteampipeExtension::new( "openai", "Steampipe plugin to query models, completions and more from OpenAI.", "https://github.com/turbot/steampipe-plugin-openai", ), SteampipeExtension::new( "openapi", "Steampipe plugin to query introspection of the OpenAPI definition.", "https://github.com/turbot/steampipe-plugin-openapi", ), SteampipeExtension::new( "openshift", "Steampipe plugin to query projects, routes, builds and more from OpenShift.", "https://github.com/turbot/steampipe-plugin-openshift", ), SteampipeExtension::new( "openstack", "Steampipe plugin to query cloud resource information from OpenStack deployments.", "https://github.com/ernw/steampipe-plugin-openstack", ), SteampipeExtension::new( "opsgenie", "Steampipe plugin for querying teams and alerts from Opsgenie.", "https://github.com/jplanckeel/steampipe-plugin-opsgenie", ), SteampipeExtension::new( "ovh", "Steampipe plugin for querying OVH.", "https://github.com/francois2metz/steampipe-plugin-ovh", ), SteampipeExtension::new( "pagerduty", "Steampipe plugin to query services, teams, escalation policies and more from your PagerDuty account.", "https://github.com/turbot/steampipe-plugin-pagerduty", ), SteampipeExtension::new( "panos", "Steampipe plugin to query PAN-OS firewalls, security policies and more.", "https://github.com/turbot/steampipe-plugin-panos", ), SteampipeExtension::new( "pipes", "Steampipe plugin for querying workspaces, connections and more from Turbot Pipes.", "https://github.com/turbot/steampipe-plugin-pipes", ), SteampipeExtension::new( "planetscale", "Steampipe plugin to query databases, logs and more from PlanetScale.", "https://github.com/turbot/steampipe-plugin-planetscale", ), SteampipeExtension::new( "prometheus", "Steampipe plugin to query metrics, labels, alerts and more from Prometheus.", "https://github.com/turbot/steampipe-plugin-prometheus", ), SteampipeExtension::new( "reddit", "Steampipe plugin to query Reddit users, posts, votes and more.", "https://github.com/turbot/steampipe-plugin-reddit", ), SteampipeExtension::new( "rss", "Steampipe plugin to query RSS channels & Atom feeds", "https://github.com/turbot/steampipe-plugin-rss", ), SteampipeExtension::new( "salesforce", "Steampipe plugin to query accounts, opportunities, users and more from your Salesforce instance.", "https://github.com/turbot/steampipe-plugin-salesforce", ), SteampipeExtension::new( "scaleway", "Steampipe plugin to query servers, networks, databases and more from your Scaleway project.", "https://github.com/turbot/steampipe-plugin-scaleway", ), SteampipeExtension::new( "scalingo", "Steampipe plugin for querying apps, addons and more from Scalingo.", "https://github.com/francois2metz/steampipe-plugin-scalingo", ), SteampipeExtension::new( "semgrep", "Steampipe plugin to query deployments, findings, and projects from Semgrep.", "https://github.com/gabrielsoltz/steampipe-plugin-semgrep", ), SteampipeExtension::new( "sentry", "Steampipe plugin to query organizations, projects, teams and more from Sentry.", "https://github.com/turbot/steampipe-plugin-sentry", ), SteampipeExtension::new( "servicenow", "Use SQL to query CMDB CI services, servers, incidents, objects and more from ServiceNow.", "https://github.com/turbot/steampipe-plugin-servicenow", ), SteampipeExtension::new( "shodan", "Steampipe plugin to query host, DNS and exploit information using Shodan.", "https://github.com/turbot/steampipe-plugin-shodan", ), SteampipeExtension::new( "shopify", "Steampipe plugin to query products, order, customers and more from Shopify.", "https://github.com/turbot/steampipe-plugin-shopify", ), SteampipeExtension::new( "slack", "Steampipe plugin for querying Slack Conversations, Groups, Users and other resources.", "https://github.com/turbot/steampipe-plugin-slack", ), SteampipeExtension::new( "snowflake", "Steampipe plugin for querying roles, databases, and more from Snowflake.", "https://github.com/turbot/steampipe-plugin-snowflake", ), SteampipeExtension::new( "solace", "Solace PubSub+ Cloud plugin for exploring your Solace Cloud configuration in depth.", "https://github.com/solacelabs/steampipe-plugin-solace", ), SteampipeExtension::new( "splunk", "Steampipe plugin to query apps, indexes, logs and more from Splunk.", "https://github.com/turbot/steampipe-plugin-splunk", ), SteampipeExtension::new( "steampipe", "Steampipe plugin for querying Steampipe components, such as the available plugins in the steampipe hub.", "https://github.com/turbot/steampipe-plugin-steampipe", ), SteampipeExtension::new( "steampipecloud", "Steampipe plugin for querying workspaces, connections and more from Steampipe Cloud.", "https://github.com/turbot/steampipe-plugin-steampipecloud", ), SteampipeExtension::new( "stripe", "Steampipe plugin for querying customers, products, invoices and more from Stripe.", "https://github.com/turbot/steampipe-plugin-stripe", ), SteampipeExtension::new( "supabase", "Steampipe plugin to query projects, functions, network restrictions, and more from your Supabase organization.", "https://github.com/turbot/steampipe-plugin-supabase", ), SteampipeExtension::new( "tailscale", "Steampipe plugin to query VPN networks, devices and more from tailscale.", "https://github.com/turbot/steampipe-plugin-tailscale", ), SteampipeExtension::new( "terraform", "Steampipe plugin to query data from Terraform files.", "https://github.com/turbot/steampipe-plugin-terraform", ), SteampipeExtension::new( "tfe", "Steampipe plugin to query resources, users and more from Terraform Enterprise.", "https://github.com/turbot/steampipe-plugin-tfe", ), SteampipeExtension::new( "tomba", "Steampipe plugin to query Domain or Email information from tomba.io.", "https://github.com/tomba-io/steampipe-plugin-tomba", ), SteampipeExtension::new( "trello", "Steampipe plugin to query boards, cards, lists, and more from Trello.", "https://github.com/turbot/steampipe-plugin-trello", ), SteampipeExtension::new( "trivy", "Steampipe plugin using Trivy to query advisories, vulnerabilities for containers, code and more.", "https://github.com/turbot/steampipe-plugin-trivy", ), SteampipeExtension::new( "turbot", "Steampipe plugin to query resources, controls, policies and more from Turbot.", "https://github.com/turbot/steampipe-plugin-turbot", ), SteampipeExtension::new( "twilio", "Steampipe plugin to query calls, messages and other communication functions from your Twilio project.", "https://github.com/turbot/steampipe-plugin-twilio", ), SteampipeExtension::new( "twitter", "Steampipe plugin to query tweets, users and followers from Twitter.", "https://github.com/turbot/steampipe-plugin-twitter", ), SteampipeExtension::new( "updown", "Steampipe plugin for querying updown.io checks, metrics and downtime data.", "https://github.com/turbot/steampipe-plugin-updown", ), SteampipeExtension::new( "uptimerobot", "Steampipe plugin to query monitors, alert contacts and more from UptimeRobot.", "https://github.com/turbot/steampipe-plugin-uptimerobot", ), SteampipeExtension::new( "urlscan", "Steampipe plugin to query URL scanning results including requests cookies, headers and more from urlscan.io.", "https://github.com/turbot/steampipe-plugin-urlscan", ), SteampipeExtension::new( "vanta", "Steampipe plugin to query users, policies, compliances, and more from your Vanta organization.", "https://github.com/turbot/steampipe-plugin-vanta", ), SteampipeExtension::new( "vault", "Steampipe plugin for querying available secret keys (not values), etc from Hashicorp Vault.", "https://github.com/theapsgroup/steampipe-plugin-vault", ), SteampipeExtension::new( "vercel", "Steampipe plugin to query projects, teams, domains and more from Vercel.", "https://github.com/turbot/steampipe-plugin-vercel", ), SteampipeExtension::new( "virustotal", "Steampipe plugin to query file, domain, URL and IP scanning results from VirusTotal.", "https://github.com/turbot/steampipe-plugin-virustotal", ), SteampipeExtension::new( "vsphere", "Steampipe plugin for querying data from a vsphere environment.", "https://github.com/theapsgroup/steampipe-plugin-vsphere", ), SteampipeExtension::new( "weatherkit", "Steampipe plugin for querying weather from WeatherKit.", "https://github.com/ellisvalentiner/steampipe-plugin-weatherkit", ), SteampipeExtension::new( "whois", "Steampipe plugin for querying domains, name servers and contact information from WHOIS.", "https://github.com/turbot/steampipe-plugin-whois", ), SteampipeExtension::new( "wiz", "Steampipe plugin to query security controls, findings, vulnerabilities, and more from your Wiz subscription.", "https://github.com/turbot/steampipe-plugin-wiz", ), SteampipeExtension::new( "workos", "Steampipe plugin to query directories, groups and more from WorkOS.", "https://github.com/turbot/steampipe-plugin-workos", ), SteampipeExtension::new( "zendesk", "Steampipe plugin for querying tickets, users, groups and more from Zendesk.", "https://github.com/turbot/steampipe-plugin-zendesk", ), SteampipeExtension::new( "zoom", "Steampipe plugin for querying Zoom meetings, webinars, users and more.", "https://github.com/turbot/steampipe-plugin-zoom", ), ] } #[derive(Debug)] pub struct SteampipeExtension { pub name: String, pub description: String, pub url: String, } impl SteampipeExtension { pub fn new(name: &str, description: &str, url: &str) -> SteampipeExtension { SteampipeExtension { name: name.to_string(), description: description.to_string(), url: url.to_string(), } } } pub fn get<'a>() -> &'a Vec { &EXTENSIONS } #[cfg(test)] mod tests { use super::*; #[test] fn test_get() { let extensions = get(); assert_eq!(143, extensions.len()); } } ================================================ FILE: postgresql_extensions/src/repository/steampipe/mod.rs ================================================ mod extensions; pub mod repository; pub const URL: &str = "https://github.com/turbot"; ================================================ FILE: postgresql_extensions/src/repository/steampipe/repository.rs ================================================ use crate::Error::ExtensionNotFound; use crate::Result; use crate::matcher::tar_gz_matcher; use crate::model::AvailableExtension; use crate::repository::steampipe::URL; use crate::repository::{Repository, steampipe}; use async_trait::async_trait; use postgresql_archive::extractor::{ExtractDirectories, tar_gz_extract}; use postgresql_archive::get_archive; use postgresql_archive::repository::github::repository::GitHub; use regex_lite::Regex; use semver::{Version, VersionReq}; use std::fmt::Debug; use std::path::PathBuf; /// Steampipe repository. #[derive(Debug)] pub struct Steampipe; impl Steampipe { /// Creates a new Steampipe repository. /// /// # Errors /// * If the repository cannot be created #[expect(clippy::new_ret_no_self)] pub fn new() -> Result> { Ok(Box::new(Self)) } /// Initializes the repository. /// /// # Errors /// * If the repository cannot be initialized. pub fn initialize() -> Result<()> { postgresql_archive::matcher::registry::register( |url| Ok(url.starts_with(URL)), tar_gz_matcher, )?; postgresql_archive::repository::registry::register( |url| Ok(url.starts_with(URL)), Box::new(GitHub::new), )?; Ok(()) } } #[async_trait] impl Repository for Steampipe { fn name(&self) -> &'static str { "steampipe" } async fn get_available_extensions(&self) -> Result> { let mut extensions = Vec::new(); for steampipe_extension in steampipe::extensions::get() { let extension = AvailableExtension::new( self.name(), steampipe_extension.name.as_str(), steampipe_extension.description.as_str(), ); extensions.push(extension); } Ok(extensions) } async fn get_archive( &self, postgresql_version: &str, name: &str, version: &VersionReq, ) -> Result<(Version, Vec)> { let Some(extension) = steampipe::extensions::get() .iter() .find(|extension| extension.name == name) else { let extension = format!("{}:{}:{}", self.name(), name, version); return Err(ExtensionNotFound(extension)); }; let url = format!("{}?postgresql_version={postgresql_version}", extension.url); let archive = get_archive(url.as_str(), version).await?; Ok(archive) } async fn install( &self, _name: &str, library_dir: PathBuf, extension_dir: PathBuf, archive: &[u8], ) -> Result> { let mut extract_directories = ExtractDirectories::default(); extract_directories.add_mapping(Regex::new(r"\.(dll|dylib|so)$")?, library_dir); extract_directories.add_mapping(Regex::new(r"\.(control|sql)$")?, extension_dir); let bytes = &archive.to_vec(); let files = tar_gz_extract(bytes, &extract_directories)?; Ok(files) } } #[cfg(test)] mod tests { use super::*; use crate::repository::Repository; #[test] fn test_name() { let repository = Steampipe; assert_eq!("steampipe", repository.name()); } #[tokio::test] async fn test_get_available_extensions() -> Result<()> { let repository = Steampipe; let extensions = repository.get_available_extensions().await?; let extension = &extensions[0]; assert_eq!("abuseipdb", extension.name()); assert_eq!( "Steampipe plugin to query IP address abuse data and more from AbuseIPDB.", extension.description() ); assert_eq!(143, extensions.len()); Ok(()) } #[tokio::test] async fn test_get_archive_error() -> anyhow::Result<()> { let repository = Steampipe; let postgresql_version = "15.7"; let name = "does-not-exist"; let version = VersionReq::parse("=0.12.0")?; let result = repository .get_archive(postgresql_version, name, &version) .await; assert!(result.is_err()); Ok(()) } } ================================================ FILE: postgresql_extensions/src/repository/tensor_chord/mod.rs ================================================ pub mod repository; pub const URL: &str = "https://github.com/tensorchord"; ================================================ FILE: postgresql_extensions/src/repository/tensor_chord/repository.rs ================================================ use crate::Result; use crate::matcher::zip_matcher; use crate::model::AvailableExtension; use crate::repository::Repository; use crate::repository::tensor_chord::URL; use async_trait::async_trait; use postgresql_archive::extractor::{ExtractDirectories, zip_extract}; use postgresql_archive::get_archive; use postgresql_archive::repository::github::repository::GitHub; use regex_lite::Regex; use semver::{Version, VersionReq}; use std::fmt::Debug; use std::path::PathBuf; /// TensorChord repository. #[derive(Debug)] pub struct TensorChord; impl TensorChord { /// Creates a new TensorChord repository. /// /// # Errors /// * If the repository cannot be created #[expect(clippy::new_ret_no_self)] pub fn new() -> Result> { Ok(Box::new(Self)) } /// Initializes the repository. /// /// # Errors /// * If the repository cannot be initialized. pub fn initialize() -> Result<()> { postgresql_archive::matcher::registry::register( |url| Ok(url.starts_with(URL)), zip_matcher, )?; postgresql_archive::repository::registry::register( |url| Ok(url.starts_with(URL)), Box::new(GitHub::new), )?; Ok(()) } } #[async_trait] impl Repository for TensorChord { fn name(&self) -> &'static str { "tensor-chord" } async fn get_available_extensions(&self) -> Result> { let extensions = vec![AvailableExtension::new( self.name(), "pgvecto.rs", "Scalable, Low-latency and Hybrid-enabled Vector Search", )]; Ok(extensions) } async fn get_archive( &self, postgresql_version: &str, name: &str, version: &VersionReq, ) -> Result<(Version, Vec)> { let url = format!("{URL}/{name}?postgresql_version={postgresql_version}"); let archive = get_archive(url.as_str(), version).await?; Ok(archive) } async fn install( &self, _name: &str, library_dir: PathBuf, extension_dir: PathBuf, archive: &[u8], ) -> Result> { let mut extract_directories = ExtractDirectories::default(); extract_directories.add_mapping(Regex::new(r"\.(dll|dylib|so)$")?, library_dir); extract_directories.add_mapping(Regex::new(r"\.(control|sql)$")?, extension_dir); let bytes = &archive.to_vec(); let files = zip_extract(bytes, &extract_directories)?; Ok(files) } } #[cfg(test)] mod tests { use super::*; use crate::repository::Repository; #[test] fn test_name() { let repository = TensorChord; assert_eq!("tensor-chord", repository.name()); } #[tokio::test] async fn test_get_available_extensions() -> Result<()> { let repository = TensorChord; let extensions = repository.get_available_extensions().await?; let extension = &extensions[0]; assert_eq!("pgvecto.rs", extension.name()); assert_eq!( "Scalable, Low-latency and Hybrid-enabled Vector Search", extension.description() ); Ok(()) } } ================================================ FILE: postgresql_extensions/tests/blocking.rs ================================================ #[cfg(feature = "blocking")] use test_log::test; #[cfg(feature = "blocking")] #[test] fn test_get_available_extensions() -> anyhow::Result<()> { let extensions = postgresql_extensions::blocking::get_available_extensions()?; #[cfg(feature = "steampipe")] assert!( extensions .iter() .any(|extension| extension.namespace() == "steampipe") ); #[cfg(feature = "tensor-chord")] assert!( extensions .iter() .any(|extension| extension.namespace() == "tensor-chord") ); Ok(()) } #[cfg(all(target_os = "linux", feature = "blocking", feature = "tensor-chord"))] #[test] fn test_extensions_blocking_lifecycle() -> anyhow::Result<()> { let installation_dir = tempfile::tempdir()?.path().to_path_buf(); let postgresql_version = semver::VersionReq::parse("=16.4.0")?; let settings = postgresql_embedded::Settings { version: postgresql_version.clone(), installation_dir: installation_dir.clone(), ..Default::default() }; let mut postgresql = postgresql_embedded::blocking::PostgreSQL::new(settings); postgresql.setup()?; let settings = postgresql.settings(); // Skip the test if the PostgreSQL version does not match; when testing with the 'bundled' // feature, the version may vary and the test will fail. if settings.version != postgresql_version { return Ok(()); } let namespace = "tensor-chord"; let name = "pgvecto.rs"; let version = semver::VersionReq::parse("=0.3.0")?; let installed_extensions = postgresql_extensions::blocking::get_installed_extensions(settings)?; assert!(installed_extensions.is_empty()); postgresql_extensions::blocking::install(settings, namespace, name, &version)?; let installed_extensions = postgresql_extensions::blocking::get_installed_extensions(settings)?; assert!(!installed_extensions.is_empty()); postgresql_extensions::blocking::uninstall(settings, namespace, name)?; let installed_extensions = postgresql_extensions::blocking::get_installed_extensions(settings)?; assert!(installed_extensions.is_empty()); std::fs::remove_dir_all(&installation_dir)?; Ok(()) } ================================================ FILE: postgresql_extensions/tests/extensions.rs ================================================ use anyhow::Result; use postgresql_extensions::get_available_extensions; #[tokio::test] async fn test_get_available_extensions() -> Result<()> { let extensions = get_available_extensions().await?; #[cfg(feature = "steampipe")] assert!( extensions .iter() .any(|extension| extension.namespace() == "steampipe") ); #[cfg(feature = "tensor-chord")] assert!( extensions .iter() .any(|extension| extension.namespace() == "tensor-chord") ); Ok(()) } #[cfg(all(target_os = "linux", feature = "tensor-chord"))] #[tokio::test] async fn test_extensions_tensor_chord_lifecycle() -> Result<()> { let installation_dir = tempfile::tempdir()?.path().to_path_buf(); let postgresql_version = semver::VersionReq::parse("=16.4.0")?; let settings = postgresql_embedded::Settings { version: postgresql_version.clone(), installation_dir: installation_dir.clone(), ..Default::default() }; let mut postgresql = postgresql_embedded::PostgreSQL::new(settings); postgresql.setup().await?; let settings = postgresql.settings(); // Skip the test if the PostgreSQL version does not match; when testing with the 'bundled' // feature, the version may vary and the test will fail. if settings.version != postgresql_version { return Ok(()); } let namespace = "tensor-chord"; let name = "pgvecto.rs"; let version = semver::VersionReq::parse("=0.3.0")?; let installed_extensions = postgresql_extensions::get_installed_extensions(settings).await?; assert!(installed_extensions.is_empty()); postgresql_extensions::install(settings, namespace, name, &version).await?; let installed_extensions = postgresql_extensions::get_installed_extensions(settings).await?; assert!(!installed_extensions.is_empty()); postgresql_extensions::uninstall(settings, namespace, name).await?; let installed_extensions = postgresql_extensions::get_installed_extensions(settings).await?; assert!(installed_extensions.is_empty()); tokio::fs::remove_dir_all(&installation_dir).await?; Ok(()) } ================================================ FILE: postgresql_extensions/tests/portal_corp.rs ================================================ #[cfg(not(any( all(target_os = "linux", target_arch = "aarch64"), all(target_os = "macos", target_arch = "x86_64") )))] #[cfg(feature = "portal-corp")] #[tokio::test] async fn test_extensions_portal_corp_lifecycle() -> anyhow::Result<()> { let installation_dir = tempfile::tempdir()?.path().to_path_buf(); let postgresql_version = semver::VersionReq::parse("=16.4.0")?; let settings = postgresql_embedded::Settings { version: postgresql_version.clone(), installation_dir: installation_dir.clone(), ..Default::default() }; let mut postgresql = postgresql_embedded::PostgreSQL::new(settings); postgresql.setup().await?; let settings = postgresql.settings(); // Skip the test if the PostgreSQL version does not match; when testing with the 'bundled' // feature, the version may vary and the test will fail. if settings.version != postgresql_version { return Ok(()); } let namespace = "portal-corp"; let name = "pgvector_compiled"; let version = semver::VersionReq::parse("=0.16.12")?; let installed_extensions = postgresql_extensions::get_installed_extensions(settings).await?; assert!(installed_extensions.is_empty()); postgresql_extensions::install(settings, namespace, name, &version).await?; let installed_extensions = postgresql_extensions::get_installed_extensions(settings).await?; assert!(!installed_extensions.is_empty()); postgresql_extensions::uninstall(settings, namespace, name).await?; let installed_extensions = postgresql_extensions::get_installed_extensions(settings).await?; assert!(installed_extensions.is_empty()); tokio::fs::remove_dir_all(&installation_dir).await?; Ok(()) } ================================================ FILE: postgresql_extensions/tests/steampipe.rs ================================================ #[cfg(any(target_os = "linux", target_os = "macos"))] #[cfg(feature = "steampipe")] #[tokio::test] async fn test_extensions_steampipe_lifecycle() -> anyhow::Result<()> { let installation_dir = tempfile::tempdir()?.path().to_path_buf(); let postgresql_version = semver::VersionReq::parse("=15.7.0")?; let settings = postgresql_embedded::Settings { version: postgresql_version.clone(), installation_dir: installation_dir.clone(), ..Default::default() }; let mut postgresql = postgresql_embedded::PostgreSQL::new(settings); postgresql.setup().await?; let settings = postgresql.settings(); // Skip the test if the PostgreSQL version does not match; when testing with the 'bundled' // feature, the version may vary and the test will fail. if settings.version != postgresql_version { return Ok(()); } let namespace = "steampipe"; let name = "csv"; let version = semver::VersionReq::parse("=0.12.0")?; let installed_extensions = postgresql_extensions::get_installed_extensions(settings).await?; assert!(installed_extensions.is_empty()); postgresql_extensions::install(settings, namespace, name, &version).await?; let installed_extensions = postgresql_extensions::get_installed_extensions(settings).await?; assert!(!installed_extensions.is_empty()); postgresql_extensions::uninstall(settings, namespace, name).await?; let installed_extensions = postgresql_extensions::get_installed_extensions(settings).await?; assert!(installed_extensions.is_empty()); tokio::fs::remove_dir_all(&installation_dir).await?; Ok(()) } ================================================ FILE: release-plz.toml ================================================ [workspace] changelog_path = "./CHANGELOG.md" git_release_enable = false git_tag_enable = false pr_name = "postgresql-embedded-v{{ version }}" release_always = false [[package]] name = "postgresql_embedded" changelog_update = true changelog_include = [ "postgresql_archive", "postgresql_commands", "postgresql_extensions", ] git_release_enable = true git_release_name = "v{{ version }}" git_tag_enable = true git_tag_name = "v{{ version }}" [changelog] body = """ ## `{{ package }}` - [{{ version | trim_start_matches(pat="v") }}]{%- if release_link -%}({{ release_link }}){% endif %} - {{ timestamp | date(format="%Y-%m-%d") }} {% for group, commits in commits | group_by(attribute="group") %} ### {{ group | upper_first }} {% for commit in commits %} {%- if commit.scope -%} - *({{commit.scope}})* {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message }}{%- if commit.links %} ({% for link in commit.links %}[{{link.text}}]({{link.href}}) {% endfor -%}){% endif %} {% else -%} - {% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message }} {% endif -%} {% endfor -%} {% endfor -%} """ ================================================ FILE: rust-toolchain.toml ================================================ [toolchain] channel = "1.92.0" profile = "default"