Repository: awslabs/coldsnap Branch: develop Commit: e4941615550a Files: 18 Total size: 116.1 KB Directory structure: gitextract_pcjjmbic/ ├── .github/ │ ├── cache_bust │ ├── dependabot.yml │ └── workflows/ │ └── rust.yml ├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE ├── NOTICE ├── README.md ├── deny.toml └── src/ ├── bin/ │ └── coldsnap/ │ └── main.rs ├── block_device.rs ├── download.rs ├── lib.rs ├── upload.rs └── wait.rs ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/cache_bust ================================================ # this file provides a manual way to clear out github actions caches. any change # to this file will cause all github action caches to miss. increment the number # below by 1 if you need to clear the caches. 3 ================================================ FILE: .github/dependabot.yml ================================================ version: 2 updates: - package-ecosystem: "cargo" directory: "/" schedule: interval: "daily" ignore: # For AWS SDK for Rust, ignore all (but one) updates # - dependency-name: "aws-config" - dependency-name: "aws-endpoint" - dependency-name: "aws-http" - dependency-name: "aws-hyper" - dependency-name: "aws-sig*" - dependency-name: "aws-sdk*" - dependency-name: "aws-smithy*" - dependency-name: "aws-types" - package-ecosystem: "github-actions" directory: "/" schedule: interval: "daily" ================================================ FILE: .github/workflows/rust.yml ================================================ name: Rust permissions: contents: read on: # triggers when a PR is posted pull_request: branches: [develop] paths-ignore: - "**.md" - ".github/dependabot.yml" # triggers when a PR is merged push: branches: [develop] paths-ignore: - "**.md" - ".github/dependabot.yml" jobs: build: strategy: matrix: include: - features: default - features: aws-sdk-rust-rustls additional_flags: --no-default-features env: CARGO_HOME: .cargo runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - uses: actions/cache@v5 with: path: | .cargo target # you can edit the .github/cache_bust file if you need to clear the cache key: ${{ hashFiles('.github/cache_bust') }}-${{ hashFiles('.github/workflows/rust.yml') }}-${{ runner.os }}-${{ matrix.features }}-${{ hashFiles('**/Cargo.lock') }} restore-keys: | ${{ hashFiles('.github/cache_bust') }}-${{ hashFiles('.github/workflows/rust.yml') }}-${{ runner.os }}-${{ matrix.features }} # print the current rustc. replace stable to pin to a specific toolchain version. - run: rustup default stable - run: rustup component add rustfmt - run: rustup component add clippy - run: cargo test --features ${{ matrix.features }} ${{ matrix.additional_flags }} --locked - run: cargo build --features ${{ matrix.features }} ${{ matrix.additional_flags }} --locked - run: cargo clippy --features ${{ matrix.features }} ${{ matrix.additional_flags }} --locked -- -D warnings --no-deps - run: cargo fmt -- --check - run: cargo install cargo-deny --locked - run: cargo deny --features ${{ matrix.features }} --no-default-features check --disable-fetch licenses bans sources ================================================ FILE: .gitignore ================================================ /target *.img ================================================ FILE: CHANGELOG.md ================================================ # Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [0.10.0] - 2026-04-28 ### Added - Add support for STS assume role via default credential provider chains [#422] - Add resume capability for interrupted downloads (thanks @msolson) [#434] ### Fixed - Fix indefinite hangs on non-EC2 network paths (thanks @nkk0) [#438] ### Changed - Allow ISC and OpenSSL licenses in `deny.toml` [#439] - Update dependencies [#423], [#425], [#427], [#428], [#429], [#430], [#431], [#432], [#435], [#436], [#440], [#441], [#445] ## [0.9.0] - 2025-10-07 ### Added - Add support for specifying KMS key for snapshot encryption [#403] - Add support for credential-process method of AWS credential retrieval (thanks @ajrudzitis) [#415] ### Changed - Update dependencies [#401], [#404], [#405], [#407], [#411], [#412], [#416], [#417] - Limit permissions of GITHUB_TOKEN in actions [#414] ## [0.8.0] - 2025-07-23 ### Changed - (breaking) Include blocks of all zeroes by default [#396] - Add user agent to API requests [#397] - Update dependencies [#387], [#388], [#390], [#392], [#394] - Fix clippy lints [#398] - Update `deny.toml` to check Linux targets [#399] ## [0.7.0] - 2025-03-31 ### Changed - Update Dependencies [#364], [#365] - Add custom tags support for snapshot upload (thanks @jshwrig) [#378] ## [0.6.3] - 2024-12-19 ### Changed - Dependency updates [#363] ## [0.6.2] - 2024-08-01 ### Changed - Dependency updates [#346] ## [0.6.1] - 2024-03-15 ### Changed - Dependency updates - Improve performance of an atomic lock (thanks @wang384670111) [#309] ## [0.6.0] - 2023-08-22 ### Security Fix - Bump openssl from 0.10.48 to 0.10.55 [#257], [#271] - Bump h2 from 0.3.15 to 0.3.20 [#257], [#271] - Bump AWS SDK for Rust [#257] ### Changed - Update other dependencies ## [0.5.1] - 2023-04-11 ### Security Fix - Bump openssl from 0.10.45 to 0.10.48 [#247] ### Changed - Bump tokio from 1.25.0 to 1.26.0 [#239] ## [0.5.0] - 2023-03-08 ### Changed - Add debug logging to help with troubleshooting [#220] - Remove minor/patch versions from Cargo.tomls [#237] - Update dependencies ## ~~[0.4.3] - 2023-03-02~~ ### ~~Changed~~ - ~~Add debug logging to help with troubleshooting [#220]~~ - ~~Remove minor/patch versions from Cargo.tomls [#237]~~ - ~~Update dependencies~~ ⚠ This release was yanked and re-released as 0.5.0 due to breaking changes. ## [0.4.2] - 2022-10-03 ### Changed - Update dependencies [#197] ## [0.4.1] - 2022-08-12 ### Changed - Prevent integer overflows during offset calculations ([#186], thanks @okudajun!) - Update dependencies ## [0.4.0] - 2022-07-26 ### Changed - Limited nix features ([#143], thanks @rtzoeller!) - Removed Rusoto in favor of AWS SDK Rust [#145] - Added support for files over 2^31 bytes ([#171], thanks @grahamc and @cole-c!) - Update dependencies [#147], [#149], [#168], [#179] ## [0.3.3] - 2022-04-26 ### Changed - Add support for uploading from a block device. [#92] - Upgrade SNAFU. ([#115], thanks, @shepmaster!) - Unpin tokio. [#129] - Update dependencies. [#91], [#94], [#97], [#98], [#99], [#102], [#103], [#105], [#106], [#109], [#111], [#112], [#114], [#115], [#116], [#117], [#118], [#119], [#123], [#124], [#127], [#130], [#131], [#132], [#134], [#135] ## [0.3.2] - 2021-07-30 ### Changed - Update dependencies. [#61], [#63], [#64], [#66], [#67], [#73], [#77], [#82], [#87], [#88] - Update docs to recommend installing with `--locked`. [#79] - Add license check to CI runner. [#74] ## [0.3.1] - 2021-05-13 ### Changed - Add backoff-retry behavior to coldsnap uploads. [#56] - Update dependencies. [#48], [#50], [#51], [#54], [#55], [#58], [#60] - Fix clippy warnings for Rust 1.52. [#57] ## [0.3.0] - 2021-02-25 ### Breaking Changes - Updated tokio to v1, this is a breaking change when using coldsnap as a library. [#39] ### Changed - Fix an issue with download filepaths [#40] ## [0.2.0] - 2020-11-11 ### Changed - Added Cargo.toml features to switch between rusoto native-tls and rustls. [#18] ## [0.1.0] - 2020-08-05 ### Added - Everything! [Unreleased]: https://github.com/awslabs/coldsnap/compare/v0.10.0...develop [0.10.0]: https://github.com/awslabs/coldsnap/compare/v0.9.0...v0.10.0 [0.9.0]: https://github.com/awslabs/coldsnap/compare/v0.8.0...v0.9.0 [0.8.0]: https://github.com/awslabs/coldsnap/compare/v0.7.0...v0.8.0 [0.7.0]: https://github.com/awslabs/coldsnap/compare/v0.6.3...v0.7.0 [0.6.3]: https://github.com/awslabs/coldsnap/compare/v0.6.2...v0.6.3 [0.6.2]: https://github.com/awslabs/coldsnap/compare/v0.6.1...v0.6.2 [0.6.1]: https://github.com/awslabs/coldsnap/compare/v0.6.0...v0.6.1 [0.6.0]: https://github.com/awslabs/coldsnap/compare/v0.5.1...v0.6.0 [0.5.1]: https://github.com/awslabs/coldsnap/compare/v0.5.0...v0.5.1 [0.5.0]: https://github.com/awslabs/coldsnap/compare/v0.4.2...v0.5.0 [0.4.3]: https://github.com/awslabs/coldsnap/compare/v0.4.2...v0.4.3 [0.4.2]: https://github.com/awslabs/coldsnap/compare/v0.4.1...v0.4.2 [0.4.1]: https://github.com/awslabs/coldsnap/compare/v0.4.0...v0.4.1 [0.4.0]: https://github.com/awslabs/coldsnap/compare/v0.3.3...v0.4.0 [0.3.3]: https://github.com/awslabs/coldsnap/compare/v0.3.2...v0.3.3 [0.3.2]: https://github.com/awslabs/coldsnap/compare/v0.3.1...v0.3.2 [0.3.1]: https://github.com/awslabs/coldsnap/compare/v0.3.0...v0.3.1 [0.3.0]: https://github.com/awslabs/coldsnap/compare/v0.2.0...v0.3.0 [0.2.0]: https://github.com/awslabs/coldsnap/compare/v0.1.0...v0.2.0 [0.1.0]: https://github.com/awslabs/coldsnap/releases/tag/v0.1.0 [#18]: https://github.com/awslabs/coldsnap/pull/18 [#39]: https://github.com/awslabs/coldsnap/pull/39 [#40]: https://github.com/awslabs/coldsnap/pull/40 [#48]: https://github.com/awslabs/coldsnap/pull/48 [#50]: https://github.com/awslabs/coldsnap/pull/50 [#51]: https://github.com/awslabs/coldsnap/pull/51 [#54]: https://github.com/awslabs/coldsnap/pull/54 [#55]: https://github.com/awslabs/coldsnap/pull/55 [#56]: https://github.com/awslabs/coldsnap/pull/56 [#57]: https://github.com/awslabs/coldsnap/pull/57 [#58]: https://github.com/awslabs/coldsnap/pull/58 [#60]: https://github.com/awslabs/coldsnap/pull/60 [#61]: https://github.com/awslabs/coldsnap/pull/61 [#63]: https://github.com/awslabs/coldsnap/pull/63 [#64]: https://github.com/awslabs/coldsnap/pull/64 [#66]: https://github.com/awslabs/coldsnap/pull/66 [#67]: https://github.com/awslabs/coldsnap/pull/67 [#73]: https://github.com/awslabs/coldsnap/pull/73 [#74]: https://github.com/awslabs/coldsnap/pull/74 [#77]: https://github.com/awslabs/coldsnap/pull/77 [#79]: https://github.com/awslabs/coldsnap/pull/79 [#82]: https://github.com/awslabs/coldsnap/pull/82 [#87]: https://github.com/awslabs/coldsnap/pull/87 [#88]: https://github.com/awslabs/coldsnap/pull/88 [#91]: https://github.com/awslabs/coldsnap/pull/91 [#92]: https://github.com/awslabs/coldsnap/pull/92 [#94]: https://github.com/awslabs/coldsnap/pull/94 [#97]: https://github.com/awslabs/coldsnap/pull/97 [#98]: https://github.com/awslabs/coldsnap/pull/98 [#99]: https://github.com/awslabs/coldsnap/pull/99 [#102]: https://github.com/awslabs/coldsnap/pull/102 [#103]: https://github.com/awslabs/coldsnap/pull/103 [#105]: https://github.com/awslabs/coldsnap/pull/105 [#106]: https://github.com/awslabs/coldsnap/pull/106 [#109]: https://github.com/awslabs/coldsnap/pull/109 [#111]: https://github.com/awslabs/coldsnap/pull/111 [#112]: https://github.com/awslabs/coldsnap/pull/112 [#114]: https://github.com/awslabs/coldsnap/pull/114 [#115]: https://github.com/awslabs/coldsnap/pull/115 [#116]: https://github.com/awslabs/coldsnap/pull/116 [#117]: https://github.com/awslabs/coldsnap/pull/117 [#118]: https://github.com/awslabs/coldsnap/pull/118 [#119]: https://github.com/awslabs/coldsnap/pull/119 [#123]: https://github.com/awslabs/coldsnap/pull/123 [#124]: https://github.com/awslabs/coldsnap/pull/124 [#127]: https://github.com/awslabs/coldsnap/pull/127 [#129]: https://github.com/awslabs/coldsnap/pull/129 [#130]: https://github.com/awslabs/coldsnap/pull/130 [#131]: https://github.com/awslabs/coldsnap/pull/131 [#132]: https://github.com/awslabs/coldsnap/pull/132 [#134]: https://github.com/awslabs/coldsnap/pull/134 [#135]: https://github.com/awslabs/coldsnap/pull/135 [#143]: https://github.com/awslabs/coldsnap/pull/143 [#145]: https://github.com/awslabs/coldsnap/pull/145 [#147]: https://github.com/awslabs/coldsnap/pull/147 [#149]: https://github.com/awslabs/coldsnap/pull/149 [#168]: https://github.com/awslabs/coldsnap/pull/168 [#171]: https://github.com/awslabs/coldsnap/pull/171 [#179]: https://github.com/awslabs/coldsnap/pull/179 [#186]: https://github.com/awslabs/coldsnap/pull/186 [#197]: https://github.com/awslabs/coldsnap/pull/197 [#220]: https://github.com/awslabs/coldsnap/pull/220 [#237]: https://github.com/awslabs/coldsnap/pull/237 [#239]: https://github.com/awslabs/coldsnap/pull/239 [#247]: https://github.com/awslabs/coldsnap/pull/247 [#257]: https://github.com/awslabs/coldsnap/pull/257 [#271]: https://github.com/awslabs/coldsnap/pull/271 [#309]: https://github.com/awslabs/coldsnap/pull/309 [#346]: https://github.com/awslabs/coldsnap/pull/346 [#363]: https://github.com/awslabs/coldsnap/pull/363 [#364]: https://github.com/awslabs/coldsnap/pull/364 [#365]: https://github.com/awslabs/coldsnap/pull/365 [#378]: https://github.com/awslabs/coldsnap/pull/378 [#387]: https://github.com/awslabs/coldsnap/pull/387 [#388]: https://github.com/awslabs/coldsnap/pull/388 [#390]: https://github.com/awslabs/coldsnap/pull/390 [#392]: https://github.com/awslabs/coldsnap/pull/392 [#394]: https://github.com/awslabs/coldsnap/pull/394 [#396]: https://github.com/awslabs/coldsnap/pull/396 [#397]: https://github.com/awslabs/coldsnap/pull/397 [#398]: https://github.com/awslabs/coldsnap/pull/398 [#399]: https://github.com/awslabs/coldsnap/pull/399 [#401]: https://github.com/awslabs/coldsnap/pull/401 [#403]: https://github.com/awslabs/coldsnap/pull/403 [#404]: https://github.com/awslabs/coldsnap/pull/404 [#405]: https://github.com/awslabs/coldsnap/pull/405 [#407]: https://github.com/awslabs/coldsnap/pull/407 [#411]: https://github.com/awslabs/coldsnap/pull/411 [#412]: https://github.com/awslabs/coldsnap/pull/412 [#414]: https://github.com/awslabs/coldsnap/pull/414 [#415]: https://github.com/awslabs/coldsnap/pull/415 [#416]: https://github.com/awslabs/coldsnap/pull/416 [#417]: https://github.com/awslabs/coldsnap/pull/417 [#422]: https://github.com/awslabs/coldsnap/pull/422 [#423]: https://github.com/awslabs/coldsnap/pull/423 [#425]: https://github.com/awslabs/coldsnap/pull/425 [#427]: https://github.com/awslabs/coldsnap/pull/427 [#428]: https://github.com/awslabs/coldsnap/pull/428 [#429]: https://github.com/awslabs/coldsnap/pull/429 [#430]: https://github.com/awslabs/coldsnap/pull/430 [#431]: https://github.com/awslabs/coldsnap/pull/431 [#432]: https://github.com/awslabs/coldsnap/pull/432 [#434]: https://github.com/awslabs/coldsnap/pull/434 [#435]: https://github.com/awslabs/coldsnap/pull/435 [#436]: https://github.com/awslabs/coldsnap/pull/436 [#438]: https://github.com/awslabs/coldsnap/pull/438 [#439]: https://github.com/awslabs/coldsnap/pull/439 [#440]: https://github.com/awslabs/coldsnap/pull/440 [#441]: https://github.com/awslabs/coldsnap/pull/441 [#445]: https://github.com/awslabs/coldsnap/pull/445 ================================================ FILE: CODE_OF_CONDUCT.md ================================================ ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ================================================ FILE: CONTRIBUTING.md ================================================ # Contributing Guidelines Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community. Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution. ## Reporting Bugs/Feature Requests We welcome you to use the GitHub issue tracker to report bugs or suggest features. When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: * A reproducible test case or series of steps * The version of our code being used * Any modifications you've made relevant to the bug * Anything unusual about your environment or deployment ## Contributing via Pull Requests Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 1. You are working against the latest source on the *master* branch. 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. To send us a pull request, please: 1. Fork the repository. 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 3. Ensure local tests pass. 4. Commit to your fork using clear commit messages. 5. Send us a pull request, answering any default questions in the pull request interface. 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). ## Finding contributions to work on Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. ## Code of Conduct This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact opensource-codeofconduct@amazon.com with any additional questions or comments. ## Security issue notifications If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. ## Licensing See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. ================================================ FILE: Cargo.toml ================================================ [package] name = "coldsnap" version = "0.10.0" description = "A library and command-line interface for uploading and downloading Amazon EBS snapshots" authors = ["Ben Cressey "] license = "Apache-2.0" edition = "2018" readme = "README.md" repository = "https://github.com/awslabs/coldsnap" keywords = ["AWS", "Amazon", "EBS", "snapshot"] [features] # This feature is for backward compatibility but has no effect. default = ["aws-sdk-rust-rustls"] aws-sdk-rust-rustls = [ ] [dependencies] argh = "0.1" async-trait = "0.1" aws-config = { version = "1", default-features = false, features = ["behavior-version-latest", "credentials-process", "default-https-client", "rt-tokio"] } aws-sdk-ebs = { version = "1", default-features = false, features = ["default-https-client", "rt-tokio"] } aws-sdk-ec2 = { version = "1", default-features = false, features = ["default-https-client", "rt-tokio"] } aws-types = "1" base64 = "0.22" bytes = "1" env_logger = "0.11" futures = "0.3" indicatif = "0.18" log = "0.4" nix = { version = "0.31", default-features = false, features = ["ioctl"] } serde = { version = "1", features = ["derive"] } serde_json = "1" sha2 = "0.11" snafu = "0.9" tempfile = "3" tokio = { version = "1", features = ["fs", "io-util", "time", "macros", "rt-multi-thread"] } ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ================================================ FILE: NOTICE ================================================ Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. ================================================ FILE: README.md ================================================ coldsnap -------- `coldsnap` is a command-line interface that uses the Amazon EBS direct APIs to upload and download snapshots. It does not need to launch an EC2 instance or manage EBS volume attachments. It can be used to simplify snapshot handling in an automated pipeline. ## Usage ### Credentials Coldsnap uses the same credential mechanisms as the `aws cli`. For example, if you have credentials in `~/.aws/credentials`, these will be used. You can specify the name of the profile to be used by adding `--profile profile-name`. You can also define environment variables, for example: ``` $ export AWS_ACCESS_KEY_ID=EXAMPLEAKIAIOSFODNN7 $ export AWS_SECRET_ACCESS_KEY=EXAMPLEKEYwJalrXUtnFEMI/K7MDENG/bPxRfiCY $ export AWS_DEFAULT_REGION=us-west-2 ``` If the name of a profile is provided, then it will be used. If not, then the default behavior of the AWS Rust SDK credential provider will be used. [Here] is the description of the default behavior. [Here]: https://docs.rs/aws-config/latest/aws_config/default_provider/credentials/struct.DefaultCredentialsChain.html ### Upload Upload a local file into an EBS snapshot: ``` $ coldsnap upload disk.img ``` If you want to wait for the uploaded snapshot to be in "available" state, add `--wait`: ``` $ coldsnap upload --wait disk.img ``` Alternately, you can use `coldsnap wait`, which offers more flexibility in terms of wait duration and behavior. ``` $ coldsnap wait snap-1234 ``` If you want to add tags to the uploaded snapshot, add `--tag Key=k,Value=v` for each desired tag: ``` $ coldsnap upload snap-1234 --tag "Key=MyKeyName,Value=MyKeyValue" --tag "Key=MyOtherKeyName,Value=MyOtherKeyValue" ``` If you want to omit blocks of all zeroes when uploading a snapshot, add `--omit-zero-blocks`. This was the historical behavior, but is usually incompatible with encrypted EBS snapshots. Applications that write zeroes to blocks will expect to read zeroes back, and for that to work, the blocks must be present in the snapshot. If unsure, avoid using this option. ``` $ coldsnap upload disk.img --omit-zero-blocks ``` If you want to encrypt with a specific KMS key when uploading a snapshot, add `--kms-key-id` and the desired ARN. ``` $ coldsnap upload disk.img --kms-key-id arn:aws:kms:us-west-2:444455556666:key/1a2b3c4d-5e6f-1a2b-3c4d-5e6f1a2b3c4d ``` ### Download Download an EBS snapshot into a local file: ``` $ coldsnap download snap-1234 disk.img ``` Run `coldsnap --help` to see more options. ## Installation `coldsnap` can be installed using [`cargo`](https://rustup.rs/). ``` $ cargo install --locked coldsnap ``` ## Security See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. ## License This project is licensed under the Apache-2.0 License. ================================================ FILE: deny.toml ================================================ # Only check Linux target triples [graph] targets = [ "x86_64-unknown-linux-gnu", "x86_64-unknown-linux-musl", "aarch64-unknown-linux-gnu", "aarch64-unknown-linux-musl", ] [licenses] # We want really high confidence when inferring licenses from text confidence-threshold = 0.93 allow = [ "Apache-2.0", #"BSD-2-Clause", # OK but currently unused; commenting to prevent warning "BSD-3-Clause", "BSL-1.0", #"CC0-1.0", # OK but currently unused; commenting to prevent warning "ISC", "MIT", #"OpenSSL", # OK but currently unused; commenting to prevent warning "Unicode-3.0", "Unlicense", #"Zlib", # OK but currently unused; commenting to prevent warning #"Unicode-DFS-2016", # OK but currently unused; commenting to prevent warning ] exceptions = [ ] [[licenses.clarify]] name = "ring" expression = "MIT AND ISC AND OpenSSL" license-files = [ { path = "LICENSE", hash = 0xbd0eed23 }, ] [[licenses.clarify]] name = "webpki" expression = "ISC" license-files = [ { path = "LICENSE", hash = 0x001c7e6c }, ] [bans] # Deny multiple versions or wildcard dependencies. multiple-versions = "deny" wildcards = "deny" skip= [ { name = "http", version = "0.2" }, { name = "http-body", version = "0.4" }, ] skip-tree = [ ] [sources] # Deny crates from unknown registries or git repositories. unknown-registry = "deny" unknown-git = "deny" ================================================ FILE: src/bin/coldsnap/main.rs ================================================ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 /*! `coldsnap` is a command-line interface that uses the Amazon EBS direct APIs to upload and download snapshots. */ use argh::FromArgs; use aws_config::default_provider::credentials::DefaultCredentialsChain; use aws_config::default_provider::region::DefaultRegionChain; use aws_config::retry::RetryConfig; use aws_config::timeout::TimeoutConfig; use aws_sdk_ebs::types::Tag; use aws_sdk_ebs::Client as EbsClient; use aws_sdk_ec2::Client as Ec2Client; use aws_types::app_name::AppName; use aws_types::region::Region; use aws_types::SdkConfig; use coldsnap::{ CheckpointBehavior, SnapshotDownloader, SnapshotUploader, SnapshotWaiter, UploadZeroBlocks, WaitParams, }; use env_logger::{Builder, Env}; use indicatif::{ProgressBar, ProgressStyle}; use log::{debug, LevelFilter}; use snafu::{ensure, ResultExt}; use std::path::PathBuf; use std::time::Duration; type Result = std::result::Result; #[tokio::main] // Returning a Result from main makes it print a Debug representation of the error, but with Snafu // we have nice Display representations of the error, so we wrap "main" (run) and print any error. // https://github.com/shepmaster/snafu/issues/110 async fn main() { if let Err(e) = run().await { eprintln!("{e}"); std::process::exit(1); } } async fn run() -> Result<()> { let args: Args = argh::from_env(); init_logger(args.verbose); let client_config = build_client_config(args.region, args.profile, args.endpoint).await; match args.subcommand { SubCommand::Download(download_args) => { let client = EbsClient::new(&client_config); let downloader = SnapshotDownloader::new(client); ensure!( download_args.file.file_name().is_some(), error::ValidateFilenameSnafu { path: download_args.file } ); ensure!( download_args.force || !download_args.file.exists(), error::FileExistsSnafu { path: download_args.file } ); let progress_bar = build_progress_bar(download_args.no_progress, "Downloading"); let checkpoint = match (download_args.checkpoint, download_args.keep_checkpoint) { (false, _) => None, (true, false) => Some(CheckpointBehavior::Enable), (true, true) => Some(CheckpointBehavior::EnableAndKeep), }; debug!( "Downloading snapshot {} to {}", download_args.snapshot_id, download_args.file.display() ); downloader .download_to_file( &download_args.snapshot_id, &download_args.file, progress_bar?, checkpoint, ) .await .context(error::DownloadSnapshotSnafu)?; } SubCommand::Upload(upload_args) => { if upload_args.workers == Some(0) { eprintln!("Error: --workers must be greater than zero"); std::process::exit(1); } if upload_args.client_shards == Some(0) { eprintln!("Error: --client-shards must be greater than zero"); std::process::exit(1); } let num_shards = upload_args.client_shards.unwrap_or(1); let uploader = if num_shards <= 1 { SnapshotUploader::new(EbsClient::new(&client_config)) } else { debug!("Creating {} EBS client shards", num_shards); let clients = (0..num_shards) .map(|_| EbsClient::new(&client_config)) .collect(); SnapshotUploader::with_client_shards(clients) }; ensure!( upload_args.file.file_name().is_some(), error::ValidateFilenameSnafu { path: upload_args.file } ); ensure!( upload_args.file.exists(), error::FileDoesNotExistSnafu { path: upload_args.file } ); let progress_bar = build_progress_bar(upload_args.no_progress, "Uploading"); let zero_blocks = upload_args .omit_zero_blocks .then_some(UploadZeroBlocks::Omit); debug!("Uploading {}", upload_args.file.display()); let snapshot_id = uploader .upload_from_file( &upload_args.file, upload_args.volume_size, upload_args.description.as_deref(), Some(upload_args.tag), progress_bar?, zero_blocks, upload_args.kms_key_id, upload_args.workers, ) .await .context(error::UploadSnapshotSnafu)?; println!("{snapshot_id}"); if upload_args.wait { debug!( "{} uploaded as snapshot {}, waiting for snapshot to be ready...", snapshot_id, upload_args.file.display() ); let client = Ec2Client::new(&client_config); let waiter = SnapshotWaiter::new(client); waiter .wait_for_completed(&snapshot_id) .await .context(error::WaitSnapshotSnafu)?; } } SubCommand::Wait(wait_args) => { let client = Ec2Client::new(&client_config); let waiter = SnapshotWaiter::new(client); let wait_params = WaitParams::new( wait_args.desired_status, wait_args.successes_required, wait_args.max_attempts, wait_args.seconds_between_attempts, ); debug!( "Waiting for snapshot {} to reach '{}'...", wait_args.snapshot_id, wait_params.state ); waiter .wait(wait_args.snapshot_id, wait_params) .await .context(error::WaitSnapshotSnafu)?; } } Ok(()) } /// Create a progress bar to show status of snapshot blocks, if wanted. fn build_progress_bar(no_progress: bool, verb: &str) -> Result> { if no_progress { return Ok(None); } let progress_bar = ProgressBar::new(0); progress_bar.set_style( ProgressStyle::default_bar() .template(&[" ", verb, " [{bar:50.white/black}] {pos}/{len} ({eta})"].concat()) .context(error::ProgressBarTemplateSnafu)? .progress_chars("=> "), ); Ok(Some(progress_bar)) } /// Create a config to build an AWS SDK client async fn build_client_config( region: Option, profile: Option, endpoint: Option, ) -> SdkConfig { let app_name = AppName::new(format!("coldsnap-{}", env!("CARGO_PKG_VERSION"))).expect("valid app name"); let mut config = aws_config::from_env().app_name(app_name); config = match (region, &profile) { (Some(region), Some(profile)) => { let region = Region::new(region); config.region(region.clone()).credentials_provider( DefaultCredentialsChain::builder() .profile_name(profile) .region(region.clone()) .build() .await, ) } (Some(region), None) => { let region = Region::new(region); config.region(region.clone()).credentials_provider( DefaultCredentialsChain::builder() .region(region.clone()) .build() .await, ) } (None, Some(profile)) => { let region = DefaultRegionChain::builder() .profile_name(profile) .build() .region() .await; config.region(region.clone()).credentials_provider( DefaultCredentialsChain::builder() .profile_name(profile) .region(region.clone()) .build() .await, ) } (None, None) => config, }; if let Some(endpoint) = &endpoint { config = config.endpoint_url(endpoint); } // The AWS SDK does not set response or per-attempt timeouts by default. // Without these, a request that sends its body but never receives a response // will block the worker indefinitely. config = config .timeout_config( TimeoutConfig::builder() .read_timeout(Duration::from_secs(12)) .operation_attempt_timeout(Duration::from_secs(20)) .operation_timeout(Duration::from_secs(120)) .build(), ) // Disable SDK-level retries; coldsnap already has its own per-block retry // loop with backoff. Layering SDK retries on top of that leads to excessive // total attempts and unpredictable wall-clock time. .retry_config(RetryConfig::standard().with_max_attempts(1)); config.load().await } /// Initializes the logger and sets logging level based on input. fn init_logger(verbose: bool) { let log_level = if verbose { LevelFilter::Debug } else { LevelFilter::Info }; // Set the default for everything to "error" unless RUST_LOG has been set to something else. Builder::from_env(Env::default().default_filter_or("error")) .format_timestamp(None) .format_target(false) // Set our own logging to what has been requested. .filter(Some("coldsnap"), log_level) .init(); } #[derive(FromArgs, PartialEq, Debug)] /// Work with snapshots through the Amazon EBS direct APIs. struct Args { #[argh(subcommand)] subcommand: SubCommand, #[argh(option)] /// the region to use region: Option, #[argh(option)] /// overrides the endpoint resolver used for all AWS Services endpoint: Option, #[argh(option)] /// the profile to use profile: Option, #[argh(switch)] /// enable verbose logging output verbose: bool, } #[derive(FromArgs, PartialEq, Debug)] #[argh(subcommand)] enum SubCommand { Download(DownloadArgs), Upload(UploadArgs), Wait(WaitArgs), } #[derive(FromArgs, PartialEq, Debug)] #[argh(subcommand, name = "download")] /// Download an EBS snapshot into a local file. struct DownloadArgs { #[argh(positional)] snapshot_id: String, #[argh(positional)] file: PathBuf, #[argh(switch)] /// overwrite an existing file force: bool, #[argh(switch)] /// disable the progress bar no_progress: bool, #[argh(switch)] /// enable checkpointing for resumable downloads checkpoint: bool, #[argh(switch)] /// keep checkpoint files after successful download (for debugging) keep_checkpoint: bool, } /// Turn a user-specified tag string into a Tag object. Tags must start with 'KEY=' and /// denote the tag value with ',Value='. fn tag_from_str(input: &str) -> std::result::Result { const KEY_DELIMITER: &str = "Key="; const VALUE_DELIMITER: &str = ",Value="; if !input.starts_with(KEY_DELIMITER) { return Err(format!("Tag inputs must start with '{KEY_DELIMITER}'").to_string()); } if !input.contains(VALUE_DELIMITER) { return Err( format!("Tag inputs must contain value entry with '{VALUE_DELIMITER}'").to_string(), ); } //We have already validated the input contains both delimiters so there is no panic risk for unwrapping directly. let (key, val) = input .split_once(KEY_DELIMITER) .unwrap() .1 .split_once(VALUE_DELIMITER) .unwrap(); if key.is_empty() { return Err("Tag inputs must contain a non-empty key entry".to_string()); } Ok(Tag::builder().key(key).value(val).build()) } #[cfg(test)] mod test { use super::*; #[test] fn valid_tag_inputs() { for input in [ "Key=A,Value=", "Key=A,Value=B", "Key=A C,Value=D C", "Key=Key=,Value=Value=", "Key=A1+-=._:/@,,Value=B1+-=._:/@,", ] { assert!(tag_from_str(input).is_ok()); } } #[test] fn invalid_tag_inputs() { for input in [ "Key=A", ",Value=B", "Key=,Value=B", "Kay=A,Value=B", "Key=A,value=B", "Kay=A,Key=A,Value=B", ] { assert!(tag_from_str(input).is_err()); } } } #[derive(FromArgs, PartialEq, Debug)] #[argh(subcommand, name = "upload")] /// Upload a local file into an EBS snapshot. struct UploadArgs { #[argh(positional)] file: PathBuf, #[argh(option)] /// the size of the volume volume_size: Option, #[argh(option)] /// the description for the snapshot description: Option, #[argh(option, from_str_fn(tag_from_str))] /// a tag for the snapshot tag: Vec, #[argh(option)] /// KMS key ARN to use for encryption. kms_key_id: Option, #[argh(switch)] /// disable the progress bar no_progress: bool, #[argh(switch)] /// wait for snapshot to be in "completed" state wait: bool, #[argh(switch)] /// omit blocks of all zeros when uploading omit_zero_blocks: bool, #[argh(option)] /// number of concurrent upload workers (default: 64) workers: Option, #[argh(option)] /// number of independent EBS clients for higher-concurrency uploads (default: 1) client_shards: Option, } /// Turn a user-specified duration in seconds into a Duration object, for argh parsing. fn seconds_from_str(input: &str) -> std::result::Result { let seconds: u64 = input .parse() .map_err(|e: std::num::ParseIntError| e.to_string())?; Ok(Duration::from_secs(seconds)) } #[derive(FromArgs, PartialEq, Debug)] #[argh(subcommand, name = "wait")] /// Wait for an EBS snapshot to be in a desired state. struct WaitArgs { #[argh(positional)] /// the ID of the snapshot to wait for snapshot_id: String, #[argh(option)] /// the desired status to wait for, like "completed" desired_status: Option, #[argh(option)] /// the number of successful checks in a row to consider the wait completed successes_required: Option, #[argh(option)] /// check at most this many times before giving up max_attempts: Option, #[argh(option, from_str_fn(seconds_from_str))] /// wait this many seconds between queries to check snapshot status seconds_between_attempts: Option, } /// Potential errors during `coldsnap` execution. mod error { use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] pub(super) enum Error { #[snafu(display("Failed to download snapshot: {}", source))] DownloadSnapshot { source: coldsnap::DownloadError }, #[snafu(display("Refusing to overwrite existing file '{}' without --force", path.display()))] FileExists { path: std::path::PathBuf }, #[snafu(display("Snapshot source file '{}' does not exist", path.display()))] FileDoesNotExist { path: std::path::PathBuf }, #[snafu(display("Failed to parse progress style template: {}", source))] ProgressBarTemplate { source: indicatif::style::TemplateError, }, #[snafu(display("Failed to upload snapshot: {}", source))] UploadSnapshot { source: coldsnap::UploadError }, #[snafu(display("Failed to validate filename '{}'", path.display()))] ValidateFilename { path: std::path::PathBuf }, #[snafu(display("Failed to wait for snapshot: {}", source))] WaitSnapshot { source: coldsnap::WaitError }, } } ================================================ FILE: src/block_device.rs ================================================ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 /*! Block device helper functions. */ use snafu::{ensure, ResultExt, Snafu}; use std::convert::TryFrom; use std::fs::OpenOptions; use std::os::unix::io::AsRawFd; use std::path::Path; #[derive(Debug, Snafu)] pub struct Error(error::Error); type Result = std::result::Result; /// Generates the blkgetsize64 function. mod ioctl { use nix::ioctl_read; ioctl_read!(blkgetsize64, 0x12, 114, u64); } /// Find the size of a block device. pub(crate) fn get_block_device_size(path: &Path) -> Result { let file = OpenOptions::new() .read(true) .open(path) .context(error::OpenFileSnafu { path })?; let mut block_device_size = 0; let result = unsafe { ioctl::blkgetsize64(file.as_raw_fd(), &mut block_device_size) } .context(error::GetBlockDeviceSizeSnafu { path })?; ensure!(result == 0, error::InvalidBlockDeviceSizeSnafu { result }); let block_device_size = i64::try_from(block_device_size).with_context(|_| error::ConvertNumberSnafu { what: "block device size", number: block_device_size.to_string(), target: "i64", })?; Ok(block_device_size) } /// Potential errors from block device helper functions. mod error { use snafu::Snafu; use std::path::PathBuf; #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] pub(super) enum Error { #[snafu(display("Failed to open '{}': {}", path.display(), source))] OpenFile { path: PathBuf, source: std::io::Error, }, #[snafu(display("Failed to get block device size for '{}': {}", path.display(), source))] GetBlockDeviceSize { path: PathBuf, source: nix::Error }, #[snafu(display("Invalid block device size: {}", result))] InvalidBlockDeviceSize { result: i32 }, #[snafu(display("Failed to convert {} {} to {}: {}", what, number, target, source))] ConvertNumber { what: String, number: String, target: String, source: std::num::TryFromIntError, }, } } ================================================ FILE: src/download.rs ================================================ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 /*! Download Amazon EBS snapshots. */ use crate::block_device::get_block_device_size; use async_trait::async_trait; use aws_sdk_ebs::Client as EbsClient; use base64::engine::general_purpose::STANDARD as base64_engine; use base64::Engine as _; use futures::stream::{self, StreamExt}; use indicatif::ProgressBar; use log::debug; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use snafu::{ensure, OptionExt, ResultExt, Snafu}; use std::collections::BTreeMap; use std::convert::TryFrom; use std::io::SeekFrom; use std::os::unix::fs::FileTypeExt; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; use tokio::fs::{self, OpenOptions}; use tokio::io::{AsyncSeekExt, AsyncWriteExt}; #[derive(Debug, Snafu)] pub struct Error(error::Error); type Result = std::result::Result; const GIBIBYTE: i64 = 1024 * 1024 * 1024; const SNAPSHOT_BLOCK_WORKERS: usize = 64; const SNAPSHOT_BLOCK_ATTEMPTS: u8 = 3; const SHA256_ALGORITHM: &str = "SHA256"; // ListSnapshotBlocks allows us to specify how many blocks are returned in each // query, from the default of 100 to the maximum of 10000. Since we fetch all // the block information up front in a loop, we ask for the maximum so that we // need fewer API calls. const LIST_REQUEST_MAX_RESULTS: i32 = 10000; const CHECKPOINT_FLUSH_INTERVAL: usize = 100; /// Specify how checkpointing should be handled for resumable downloads. #[derive(Copy, Clone, PartialEq)] pub enum CheckpointBehavior { /// Disable checkpointing, ignore existing checkpoint. Disable, /// Enable checkpointing for resumable downloads. Enable, /// Enable checkpointing, keep checkpoint file after successful download. EnableAndKeep, } pub struct SnapshotDownloader { ebs_client: EbsClient, } impl SnapshotDownloader { pub fn new(ebs_client: EbsClient) -> Self { SnapshotDownloader { ebs_client } } /// Download a snapshot into the file at the specified path. /// * `snapshot_id` is the snapshot to download. /// * `path` is the destination file for the snapshot. It will be extended to the volume size /// of the snapshot. If the snapshot is sparse, i.e. not all blocks are present, then the file /// will contain holes that return zeroes when read. /// * `progress_bar` is optional, since output to the terminal may not be wanted. /// * `checkpoint` specifies checkpointing behavior for resumable downloads. pub async fn download_to_file>( &self, snapshot_id: &str, path: P, progress_bar: Option, checkpoint: Option, ) -> Result<()> { let checkpoint = checkpoint.unwrap_or(CheckpointBehavior::Disable); let path = path.as_ref(); let _ = path .file_name() .context(error::ValidateFileNameSnafu { path })?; // Find the overall volume size, the block size, and the metadata we need for each block: // the index, which lets us calculate the offset into the volume; and the token, which we // need to retrieve it. let mut snapshot: Snapshot = self.list_snapshot_blocks(snapshot_id).await?; // Check for checkpoint file and filter out already-completed blocks let checkpoint_file = progress_path(path); let mut previously_completed: Vec = Vec::new(); let mut resuming = false; let checkpoint_data = match checkpoint != CheckpointBehavior::Disable { true => tokio::fs::read_to_string(&checkpoint_file).await.ok(), false => None, } .and_then(|data| serde_json::from_str::(&data).ok()) .filter(|p| p.snapshot_id == snapshot_id); if let Some(progress) = checkpoint_data { let completed: std::collections::BTreeSet = progress.completed_blocks.iter().copied().collect(); let original_count = snapshot.blocks.len(); snapshot.blocks.retain(|b| !completed.contains(&b.index)); previously_completed = progress.completed_blocks; debug!( "Resuming download: {} of {} blocks remaining", snapshot.blocks.len(), original_count ); resuming = true; } let mut target = if BlockDeviceTarget::is_valid(path).await? { BlockDeviceTarget::new_target(path)? } else { FileTarget::new_target(path)? }; if !resuming { debug!("Writing {}G to {}...", snapshot.volume_size, path.display()); target.grow(snapshot.volume_size * GIBIBYTE).await?; } self.write_snapshot_blocks( snapshot, target.write_path()?, path, progress_bar, checkpoint, previously_completed, ) .await?; target.finalize().await?; // Clean up checkpoint file on success if checkpoint != CheckpointBehavior::EnableAndKeep { let _ = std::fs::remove_file(&checkpoint_file); } Ok(()) } async fn write_snapshot_blocks( &self, snapshot: Snapshot, write_path: &Path, progress_path_base: &Path, progress_bar: Option, checkpoint: CheckpointBehavior, previously_completed: Vec, ) -> Result<()> { // Collect errors encountered while downloading blocks, since we can't // return a result directly through `for_each_concurrent`. let block_errors = Arc::new(Mutex::new(BTreeMap::new())); let completed_blocks = Arc::new(Mutex::new(previously_completed)); let last_flush_count = Arc::new(Mutex::new(0usize)); // We may have a progress bar to update. let progress_bar = match progress_bar { Some(pb) => { let pb_length = snapshot.blocks.len(); let pb_length = u64::try_from(pb_length).with_context(|_| error::ConvertNumberSnafu { what: "progress bar length", number: pb_length.to_string(), target: "u64", })?; pb.set_length(pb_length); Arc::new(Some(pb)) } None => Arc::new(None), }; let snapshot_id_for_flush = snapshot.snapshot_id.clone(); let progress_path_for_flush = progress_path_base.to_path_buf(); // Create a context for each block that can be moved to another thread. let mut block_contexts = Vec::new(); for SnapshotBlock { index, token } in snapshot.blocks { block_contexts.push(BlockContext { path: write_path.to_path_buf(), block_index: index, block_token: token, block_size: snapshot.block_size, snapshot_id: snapshot.snapshot_id.clone(), block_errors: Arc::clone(&block_errors), progress_bar: Arc::clone(&progress_bar), ebs_client: self.ebs_client.clone(), }); } // Distribute the work across a fixed number of concurrent workers. // New threads will be created by the runtime as needed, but we'll // only process this many blocks at once to limit resource usage. let download = stream::iter(block_contexts).for_each_concurrent(SNAPSHOT_BLOCK_WORKERS, |context| { let completed_blocks = Arc::clone(&completed_blocks); let last_flush_count = Arc::clone(&last_flush_count); let snapshot_id = snapshot_id_for_flush.clone(); let progress_file_path = progress_path_for_flush.clone(); async move { for i in 0..SNAPSHOT_BLOCK_ATTEMPTS { let block_result = self.download_block(&context).await; { let mut block_errors = context.block_errors.lock().expect("poisoned"); if let Err(e) = block_result { debug!( "Error downloading block, attempt {} of {}", i + 1, SNAPSHOT_BLOCK_ATTEMPTS ); block_errors.insert(context.block_index, e); continue; } block_errors.remove(&context.block_index); } if checkpoint == CheckpointBehavior::Disable { break; } // Track completion and flush checkpoint periodically let completed_count = { let mut completed = completed_blocks.lock().expect("poisoned"); completed.push(context.block_index); completed.len() }; let should_flush = { let mut last_flush = last_flush_count.lock().expect("poisoned"); if completed_count - *last_flush >= CHECKPOINT_FLUSH_INTERVAL { *last_flush = completed_count; true } else { false } }; if should_flush { let blocks: Vec = completed_blocks.lock().expect("poisoned").clone(); write_progress(&progress_file_path, &snapshot_id, &blocks).await; } break; } } }); download.await; // At this point, all the concurrent jobs have finished, so all of the Arcs we copied have // been dropped. Hence there's exactly one strong reference and it's safe to `try_unwrap` // and `unwrap` the result to recover the contents. Any of the Mutexes inside are safe to // unwrap unless they've been poisoned by a panic, in which case we also panic. // Summarize any fatal errors. let block_errors = Arc::try_unwrap(block_errors) .expect("referenced") .into_inner() .expect("poisoned"); let block_errors_count = block_errors.keys().len(); if block_errors_count != 0 { // Final flush before returning error if checkpoint != CheckpointBehavior::Disable { let blocks: Vec = completed_blocks.lock().expect("poisoned").clone(); write_progress(progress_path_base, &snapshot.snapshot_id, &blocks).await; } let failed_blocks: Vec = block_errors.keys().copied().collect(); let error_report = format!("blocks {:?}", failed_blocks); error::GetSnapshotBlocksSnafu { error_count: block_errors_count, snapshot_id: snapshot.snapshot_id, error_report, } .fail()?; } Ok(()) } /// Retrieve the index and token for all snapshot blocks. async fn list_snapshot_blocks(&self, snapshot_id: &str) -> Result { let mut blocks = Vec::new(); let max_results = LIST_REQUEST_MAX_RESULTS; let mut next_token = None; let mut volume_size; let mut block_size; loop { let response = self .ebs_client .list_snapshot_blocks() .snapshot_id(snapshot_id) .set_next_token(next_token) .max_results(max_results) .send() .await .context(error::ListSnapshotBlocksSnafu { snapshot_id })?; volume_size = response .volume_size .context(error::FindVolumeSizeSnafu { snapshot_id })?; block_size = response .block_size .context(error::FindBlockSizeSnafu { snapshot_id })?; for block in response.blocks.unwrap_or_default().iter() { let index = block .block_index .context(error::FindBlockIndexSnafu { snapshot_id })?; let token = String::from(block.block_token.as_ref().context( error::FindBlockPropertySnafu { snapshot_id, block_index: index, property: "token", }, )?); blocks.push(SnapshotBlock { index, token }); } next_token = response.next_token; if next_token.is_none() { break; } } Ok(Snapshot { snapshot_id: snapshot_id.to_string(), volume_size, block_size, blocks, }) } /// Download a single block from the snapshot in context and write it to the file. async fn download_block(&self, context: &BlockContext) -> Result<()> { let snapshot_id = &context.snapshot_id; let block_index = context.block_index; let block_token = &context.block_token; let block_size = context.block_size; let response = context .ebs_client .get_snapshot_block() .snapshot_id(snapshot_id) .block_index(block_index) .block_token(block_token) .send() .await .context(error::GetSnapshotBlockSnafu { snapshot_id, block_index, })?; let expected_hash = response.checksum.context(error::FindBlockPropertySnafu { snapshot_id, block_index, property: "checksum", })?; let checksum_algorithm = response .checksum_algorithm .context(error::FindBlockPropertySnafu { snapshot_id, block_index, property: "checksum algorithm", })? .as_str() .to_string(); let data_length = response .data_length .context(error::FindBlockPropertySnafu { snapshot_id, block_index, property: "data length", })?; let block_data_stream = response .block_data .collect() .await .context(error::CollectByteStreamSnafu { snapshot_id, block_index, property: "data", })?; let block_data = block_data_stream.into_bytes(); ensure!( checksum_algorithm == SHA256_ALGORITHM, error::UnexpectedBlockChecksumAlgorithmSnafu { snapshot_id, block_index, checksum_algorithm, } ); let block_data_length = block_data.len(); let block_data_length = i32::try_from(block_data_length).with_context(|_| error::ConvertNumberSnafu { what: "block data length", number: block_data_length.to_string(), target: "i32", })?; ensure!( data_length > 0 && data_length <= block_size && data_length == block_data_length, error::UnexpectedBlockDataLengthSnafu { snapshot_id, block_index, data_length, } ); let mut block_digest = Sha256::new(); block_digest.update(&block_data); let hash_bytes = block_digest.finalize(); let block_hash = base64_engine.encode(hash_bytes); ensure!( block_hash == expected_hash, error::BadBlockChecksumSnafu { snapshot_id, block_index, block_hash, expected_hash, } ); // Blocks of all zeroes can be omitted from the file. let sparse = block_data.iter().all(|&byte| byte == 0u8); if sparse { if let Some(ref progress_bar) = *context.progress_bar { progress_bar.inc(1); } return Ok(()); } let path: &Path = context.path.as_ref(); let mut f = OpenOptions::new() .write(true) .open(path) .await .context(error::OpenFileSnafu { path })?; // Calculate the offset to write the block into the target file let block_index_u64 = u64::try_from(context.block_index).with_context(|_| error::ConvertNumberSnafu { what: "block index", number: context.block_index.to_string(), target: "u64", })?; let block_size_u64 = u64::try_from(block_size).with_context(|_| error::ConvertNumberSnafu { what: "block size", number: block_size.to_string(), target: "u64", })?; let offset = block_index_u64 * block_size_u64; f.seek(SeekFrom::Start(offset)) .await .context(error::SeekFileOffsetSnafu { path, offset })?; let count = usize::try_from(data_length).with_context(|_| error::ConvertNumberSnafu { what: "byte count", number: data_length.to_string(), target: "usize", })?; f.write_all(&block_data) .await .context(error::WriteFileBytesSnafu { path, count })?; f.flush().await.context(error::FlushFileSnafu { path })?; if let Some(ref progress_bar) = *context.progress_bar { progress_bar.inc(1); } Ok(()) } } /// Stores the metadata about the snapshot contents. struct Snapshot { snapshot_id: String, volume_size: i64, block_size: i32, blocks: Vec, } /// Stores the metadata about a snapshot block. struct SnapshotBlock { index: i32, token: String, } /// Stores the context needed to download a snapshot block. struct BlockContext { path: PathBuf, block_index: i32, block_token: String, block_size: i32, snapshot_id: String, block_errors: Arc>>, progress_bar: Arc>, ebs_client: EbsClient, } /// Shared interface for write targets. #[async_trait] trait SnapshotWriteTarget { // grow the target to the desired length async fn grow(&mut self, length: i64) -> Result<()>; // returns the file path to which blocks must be written fn write_path(&self) -> Result<&Path>; // persist the contents to disk async fn finalize(&mut self) -> Result<()>; } /// Implements file operations for block devices. struct BlockDeviceTarget { path: PathBuf, } impl BlockDeviceTarget { fn new_target>(path: P) -> Result> { let path = path.as_ref(); Ok(Box::new(BlockDeviceTarget { path: path.into() })) } async fn is_valid>(path: P) -> Result { let path = path.as_ref(); if !path.exists() { return Ok(false); } let file_meta = fs::metadata(path) .await .context(error::ReadFileMetadataSnafu { path })?; if file_meta.file_type().is_block_device() { Ok(true) } else { Ok(false) } } } #[async_trait] impl SnapshotWriteTarget for BlockDeviceTarget { // ensures existing size >= length, but otherwise leaves untouched async fn grow(&mut self, length: i64) -> Result<()> { let path = self.path.as_path(); let block_device_size = get_block_device_size(path).context(error::GetBlockDeviceSizeSnafu)?; // Make sure the block device is big enough to hold the snapshot ensure!( block_device_size >= length, error::BlockDeviceTooSmallSnafu { block_device_size: block_device_size / GIBIBYTE, needed: length / GIBIBYTE, } ); Ok(()) } // returns the file path to which blocks must be written fn write_path(&self) -> Result<&Path> { Ok(self.path.as_path()) } // no-op async fn finalize(&mut self) -> Result<()> { Ok(()) } } /// Implements file operations for filesystem files. struct FileTarget { path: PathBuf, partial_path: PathBuf, } impl FileTarget { fn new_target>(path: P) -> Result> { let path = path.as_ref(); let mut partial_path = path.as_os_str().to_owned(); partial_path.push(".partial"); Ok(Box::new(FileTarget { path: path.into(), partial_path: PathBuf::from(partial_path), })) } } #[async_trait] impl SnapshotWriteTarget for FileTarget { // truncate file to desired size async fn grow(&mut self, length: i64) -> Result<()> { let file_len = u64::try_from(length).with_context(|_| error::ConvertNumberSnafu { what: "file length", number: length.to_string(), target: "u64", })?; let file = std::fs::File::create(&self.partial_path).context(error::CreateFileSnafu { path: &self.partial_path, })?; file.set_len(file_len).context(error::ExtendFileSnafu { path: &self.partial_path, })?; Ok(()) } fn write_path(&self) -> Result<&Path> { Ok(self.partial_path.as_path()) } // persist file to destination async fn finalize(&mut self) -> Result<()> { tokio::fs::rename(&self.partial_path, &self.path) .await .context(error::RenameFileSnafu { from: &self.partial_path, to: &self.path, })?; Ok(()) } } /// Checkpoint progress file for resumable downloads. #[derive(Serialize, Deserialize)] struct ProgressFile { snapshot_id: String, completed_blocks: Vec, } /// Returns the path to the checkpoint progress file for a given target path. fn progress_path(target_path: &Path) -> PathBuf { let mut path = target_path.as_os_str().to_owned(); path.push(".coldsnap-progress"); PathBuf::from(path) } /// Writes checkpoint progress to disk. async fn write_progress(target_path: &Path, snapshot_id: &str, completed_blocks: &[i32]) { let progress = ProgressFile { snapshot_id: snapshot_id.to_string(), completed_blocks: completed_blocks.to_vec(), }; if let Ok(data) = serde_json::to_string(&progress) { let _ = tokio::fs::write(progress_path(target_path), data).await; } } /// Potential errors while downloading a snapshot and writing to a local file. mod error { use aws_sdk_ebs::{ self, operation::{ get_snapshot_block::GetSnapshotBlockError, list_snapshot_blocks::ListSnapshotBlocksError, }, }; use snafu::Snafu; use std::path::PathBuf; #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] pub(super) enum Error { #[snafu(display("Failed to read metadata for '{}': {}", path.display(), source))] ReadFileMetadata { path: PathBuf, source: std::io::Error, }, #[snafu(display("{}", source))] GetBlockDeviceSize { source: crate::block_device::Error }, #[snafu(display( "Block device too small: block device size {} GiB, needed at least {} GiB", block_device_size, needed ))] BlockDeviceTooSmall { block_device_size: i64, needed: i64 }, #[snafu(display("Failed to validate file name '{}'", path.display()))] ValidateFileName { path: PathBuf }, #[snafu(display("Failed to find parent directory for file name '{}'", path.display()))] ValidateParentDirectory { path: PathBuf }, #[snafu(display("Failed to create file '{}': {}", path.display(), source))] CreateFile { path: PathBuf, source: std::io::Error, }, #[snafu(display("Failed to extend file '{}': {}", path.display(), source))] ExtendFile { path: PathBuf, source: std::io::Error, }, #[snafu(display("Failed to rename '{}' to '{}': {}", from.display(), to.display(), source))] RenameFile { from: PathBuf, to: PathBuf, source: std::io::Error, }, #[snafu(display("Failed to list snapshot blocks '{snapshot_id}': {source}", source = crate::error_stack(source, 2)))] ListSnapshotBlocks { snapshot_id: String, #[snafu(source(from(aws_sdk_ebs::error::SdkError, Box::new)))] source: Box>, }, #[snafu(display("Failed to find volume size for '{}'", snapshot_id))] FindVolumeSize { snapshot_id: String }, #[snafu(display("Failed to find index for block in '{}'", snapshot_id))] FindBlockIndex { snapshot_id: String }, #[snafu(display( "Failed to find {} for block {} in '{}'", property, block_index, snapshot_id ))] FindBlockProperty { snapshot_id: String, block_index: i32, property: String, }, #[snafu(display( "Failed to find {} for block {} in '{}'", property, block_index, snapshot_id ))] CollectByteStream { snapshot_id: String, block_index: i32, property: String, #[snafu(source(from(aws_sdk_ebs::primitives::ByteStreamError, Box::new)))] source: Box, }, #[snafu(display("Failed to find block size for '{}'", snapshot_id))] FindBlockSize { snapshot_id: String }, #[snafu(display( "Found unexpected checksum algorithm '{}' for block {} in '{}'", checksum_algorithm, block_index, snapshot_id ))] UnexpectedBlockChecksumAlgorithm { snapshot_id: String, block_index: i64, checksum_algorithm: String, }, #[snafu(display( "Found unexpected data length {} for block {} in '{}'", data_length, block_index, snapshot_id ))] UnexpectedBlockDataLength { snapshot_id: String, block_index: i64, data_length: i64, }, #[snafu(display( "Bad checksum for block {} in '{}': expected '{}', got '{}'", block_index, snapshot_id, expected_hash, block_hash, ))] BadBlockChecksum { snapshot_id: String, block_index: i64, block_hash: String, expected_hash: String, }, #[snafu(display( "Failed to get block {} for snapshot '{}': {}", block_index, snapshot_id, source ))] GetSnapshotBlock { snapshot_id: String, block_index: i64, #[snafu(source(from(aws_sdk_ebs::error::SdkError, Box::new)))] source: Box>, }, #[snafu(display( "Failed to get {} blocks for snapshot '{}': {}", error_count, snapshot_id, error_report ))] GetSnapshotBlocks { error_count: usize, snapshot_id: String, error_report: String, }, #[snafu(display("Failed to flush '{}': {}", path.display(), source))] FlushFile { path: PathBuf, source: std::io::Error, }, #[snafu(display("Failed to open '{}': {}", path.display(), source))] OpenFile { path: PathBuf, source: std::io::Error, }, #[snafu(display("Failed to seek to {} in '{}': {}", offset, path.display(), source))] SeekFileOffset { path: PathBuf, offset: u64, source: std::io::Error, }, #[snafu(display("Failed to write {} bytes to '{}': {}", count, path.display(), source))] WriteFileBytes { path: PathBuf, count: usize, source: std::io::Error, }, #[snafu(display("Failed to convert {} {} to {}: {}", what, number, target, source))] ConvertNumber { what: String, number: String, target: String, source: std::num::TryFromIntError, }, } } #[cfg(test)] mod test { use super::*; use tempfile::tempdir; #[test] fn progress_path_appends_suffix() { let path = Path::new("/tmp/disk.img"); let progress = progress_path(path); assert_eq!(progress, PathBuf::from("/tmp/disk.img.coldsnap-progress")); } #[test] fn progress_file_roundtrip() { let dir = tempdir().unwrap(); let target = dir.path().join("disk.img"); let progress = ProgressFile { snapshot_id: "snap-123".to_string(), completed_blocks: vec![0, 5, 10], }; let path = progress_path(&target); let data = serde_json::to_string(&progress).unwrap(); std::fs::write(&path, &data).unwrap(); let loaded: ProgressFile = serde_json::from_str(&std::fs::read_to_string(&path).unwrap()).unwrap(); assert_eq!(loaded.snapshot_id, "snap-123"); assert_eq!(loaded.completed_blocks, vec![0, 5, 10]); } #[test] fn progress_file_filters_completed_blocks() { let all_blocks = vec![ SnapshotBlock { index: 0, token: "a".into(), }, SnapshotBlock { index: 1, token: "b".into(), }, SnapshotBlock { index: 2, token: "c".into(), }, SnapshotBlock { index: 3, token: "d".into(), }, ]; let completed: std::collections::BTreeSet = vec![0, 2].into_iter().collect(); let remaining: Vec<_> = all_blocks .into_iter() .filter(|b| !completed.contains(&b.index)) .collect(); assert_eq!(remaining.len(), 2); assert_eq!(remaining[0].index, 1); assert_eq!(remaining[1].index, 3); } #[test] fn progress_file_ignores_mismatched_snapshot_id() { let progress = ProgressFile { snapshot_id: "snap-different".to_string(), completed_blocks: vec![0, 1, 2], }; let current_snapshot_id = "snap-123"; let should_resume = progress.snapshot_id == current_snapshot_id; assert!(!should_resume); } } ================================================ FILE: src/lib.rs ================================================ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 /*! A library that uses the Amazon EBS direct APIs to work with snapshots. # Examples Downloading a snapshot into a disk image: ``` use coldsnap::SnapshotDownloader; use aws_sdk_ebs::Client as EbsClient; use std::path::Path; # async fn doc() { let client = EbsClient::new(&aws_config::from_env().region("us-west-2").load().await); let downloader = SnapshotDownloader::new(client); let path = Path::new("./disk.img"); downloader.download_to_file("snap-1234", &path, None, None) .await .expect("failed to download snapshot"); # } ``` Uploading a disk image into a snapshot: ``` use coldsnap::SnapshotUploader; use aws_sdk_ebs::Client as EbsClient; use std::path::Path; # async fn doc() { let client = EbsClient::new(&aws_config::from_env().region("us-west-2").load().await); let uploader = SnapshotUploader::new(client); let path = Path::new("./disk.img"); let snapshot_id = uploader.upload_from_file(&path, None, None, None, None, None, None, None) .await .expect("failed to upload snapshot"); # } ``` Waiting for a snapshot to be completed: ``` use coldsnap::SnapshotWaiter; use aws_sdk_ec2::Client as Ec2Client; # async fn doc() { let client = Ec2Client::new(&aws_config::from_env().region("us-west-2").load().await); let waiter = SnapshotWaiter::new(client); waiter.wait_for_completed("snap-1234") .await .expect("failed to wait for snapshot"); # } ``` */ mod block_device; mod download; mod upload; mod wait; pub use download::CheckpointBehavior; pub use download::Error as DownloadError; pub use download::SnapshotDownloader; pub use upload::Error as UploadError; pub use upload::SnapshotUploader; pub use upload::ZeroBlocks as UploadZeroBlocks; pub use wait::Error as WaitError; pub use wait::{SnapshotWaiter, WaitParams}; /// Errors from the AWS Rust SDK crate often swallow relevant information when they are printed /// using `Display`. This leads to errors that do not have enough information for the user to know /// what went wrong. This function `Display` prints an error and recursively adds up to n levels of /// underlying errors to that printed message. pub(crate) fn error_stack(e: &dyn std::error::Error, n: u16) -> String { let mut current_error = e; let mut s = format!("{e}"); for _ in 0..n { current_error = match current_error.source() { None => return s, Some(next_error) => next_error, }; s += &format!(": {current_error}") } s } ================================================ FILE: src/upload.rs ================================================ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 /*! Upload Amazon EBS snapshots. */ use crate::block_device::get_block_device_size; use aws_sdk_ebs::primitives::ByteStream; use aws_sdk_ebs::types::{ChecksumAggregationMethod, ChecksumAlgorithm, Tag}; use aws_sdk_ebs::Client as EbsClient; use base64::engine::general_purpose::STANDARD as base64_engine; use base64::Engine as _; use bytes::BytesMut; use futures::stream::{self, StreamExt}; use indicatif::ProgressBar; use log::{debug, info, warn}; use sha2::{Digest, Sha256}; use snafu::{ensure, OptionExt, ResultExt, Snafu}; use std::cmp; use std::collections::BTreeMap; use std::convert::TryFrom; use std::ffi::OsStr; use std::io::SeekFrom; use std::os::unix::fs::FileTypeExt; use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicI32, AtomicU64, Ordering as AtomicOrdering}; use std::sync::{Arc, Mutex}; use std::time::Duration; use tokio::fs::{self, File}; use tokio::io::{AsyncReadExt, AsyncSeekExt}; use tokio::time; #[derive(Debug, Snafu)] pub struct Error(error::Error); type Result = std::result::Result; const GIBIBYTE: i64 = 1024 * 1024 * 1024; const SNAPSHOT_BLOCK_WORKERS: usize = 64; // How long to wait between attempts; this number * attempt number, in seconds. const SNAPSHOT_BLOCK_RETRY_SCALE: u64 = 2; // 5 retries with scale 2 gives us 20 seconds of backoff. With SDK-level timeouts // now in place, each attempt is bounded, so fewer retries are needed. const SNAPSHOT_BLOCK_ATTEMPTS: u64 = 5; const SNAPSHOT_TIMEOUT_MINUTES: i32 = 10; const SHA256_ALGORITHM: ChecksumAlgorithm = ChecksumAlgorithm::ChecksumAlgorithmSha256; const LINEAR_METHOD: ChecksumAggregationMethod = ChecksumAggregationMethod::ChecksumAggregationLinear; /// Collects per-block upload latencies for a summary logged after the upload. struct UploadStats { buckets: [AtomicU64; 6], errors: AtomicU64, } impl UploadStats { fn new() -> Self { Self { buckets: Default::default(), errors: AtomicU64::new(0), } } fn record_success(&self, elapsed: Duration) { let bucket = match elapsed.as_millis() { 0..250 => 0, 250..500 => 1, 500..1000 => 2, 1000..2000 => 3, 2000..5000 => 4, _ => 5, }; self.buckets[bucket].fetch_add(1, AtomicOrdering::Relaxed); } fn record_error(&self) { self.errors.fetch_add(1, AtomicOrdering::Relaxed); } fn report(&self) { let b: Vec = self .buckets .iter() .map(|a| a.load(AtomicOrdering::Relaxed)) .collect(); let e = self.errors.load(AtomicOrdering::Relaxed); info!( "Upload complete: <250ms={} 250-500ms={} 500ms-1s={} 1-2s={} 2-5s={} >5s={} errors={}", b[0], b[1], b[2], b[3], b[4], b[5], e ); } } /// Specify how blocks of all zeroes should be handled. #[derive(Copy, Clone)] pub enum ZeroBlocks { /// Include blocks of all zeroes in the snapshot. Include, /// Omit blocks of all zeroes from the snapshot. /// This is incompatible with encrypted snapshots if the application expects to read zeroes /// from those blocks. Omit, } pub struct SnapshotUploader { ebs_clients: Vec, } impl SnapshotUploader { pub fn new(ebs_client: EbsClient) -> Self { SnapshotUploader { ebs_clients: vec![ebs_client], } } /// Create an uploader with multiple independent EBS clients. Blocks are /// distributed across clients by index, giving each a separate HTTP /// connection pool. This can reduce head-of-line blocking when many /// workers share a single pool over high-latency paths. pub fn with_client_shards(ebs_clients: Vec) -> Self { assert!(!ebs_clients.is_empty(), "need at least one EBS client"); SnapshotUploader { ebs_clients } } fn client_for_block(&self, block_index: i32) -> &EbsClient { &self.ebs_clients[block_index as usize % self.ebs_clients.len()] } /// Upload a snapshot from the file at the specified path. /// * `path` is the source file for the snapshot. /// * `volume_size` is the desired size in GiB. If no size is provided (`None`), the source /// file's size will be rounded up to the nearest GiB and used instead. /// * `description` is the snapshot description. If no description is provided (`None`), the /// source file's name will be used instead. /// * 'tags' is the tags to add to the snapshot. If no tags are provided ('None'), then no /// tags are added. /// * `progress_bar` is optional, since output to the terminal may not be wanted. /// * `zero_blocks` specifies how zero blocks will be handled. If no value is provided /// (`None`), then all blocks will be uploaded. /// * `kms_key_id` is the KMS key ARN to use for encryption. #[allow(clippy::too_many_arguments)] pub async fn upload_from_file>( &self, path: P, volume_size: Option, description: Option<&str>, tags: Option>, progress_bar: Option, zero_blocks: Option, kms_key_id: Option, workers: Option, ) -> Result { let path = path.as_ref(); let description = description.map(|s| s.to_string()).unwrap_or_else(|| { path.file_name() .unwrap_or_else(|| OsStr::new("")) .to_string_lossy() .to_string() }); let file_meta = fs::metadata(path) .await .context(error::ReadFileMetadataSnafu { path })?; let file_size = if file_meta.file_type().is_block_device() { get_block_device_size(path).context(error::GetBlockDeviceSizeSnafu)? } else { self.file_size(&file_meta).await? }; // EBS snapshots must be multiples of 1 GiB in size. let min_volume_size = cmp::max((file_size + GIBIBYTE - 1) / GIBIBYTE, 1); // If a volume size was provided, make sure it will be big enough. let volume_size = volume_size.unwrap_or(min_volume_size); ensure!( volume_size >= min_volume_size, error::BadVolumeSizeSnafu { requested: volume_size, needed: min_volume_size, } ); // Start the snapshot, which gives us the ID and block size we need. debug!("Uploading {volume_size}G to snapshot..."); let (snapshot_id, block_size) = self .start_snapshot(volume_size, description, tags, kms_key_id) .await?; let file_blocks = (file_size + i64::from(block_size - 1)) / i64::from(block_size); let file_blocks = i32::try_from(file_blocks).with_context(|_| error::ConvertNumberSnafu { what: "calculate file blocks", number: file_blocks.to_string(), target: "i32", })?; // We skip sparse blocks, so we need to keep track of how many we send. let changed_blocks_count = Arc::new(AtomicI32::new(0)); // Track the hashes of uploaded blocks for the final hash. let block_digests = Arc::new(Mutex::new(BTreeMap::new())); // Collect errors encountered while uploading blocks, since we can't // return a result directly through `for_each_concurrent`. let block_errors = Arc::new(Mutex::new(BTreeMap::new())); // We may have a progress bar to update. let progress_bar = match progress_bar { Some(pb) => { let pb_length = file_blocks; let pb_length = u64::try_from(pb_length).with_context(|_| error::ConvertNumberSnafu { what: "progress bar length", number: pb_length.to_string(), target: "u64", })?; pb.set_length(pb_length); Arc::new(Some(pb)) } None => Arc::new(None), }; let zero_blocks = zero_blocks.unwrap_or(ZeroBlocks::Include); // Create a context for each block that can be moved to another thread. let mut block_contexts = Vec::new(); let mut remaining_data = file_size; for i in 0..file_blocks { // The file length may not be an exact multiple of the block size, // so we need to keep track of how many bytes are left for the call // to `read_exact` later. let data_length = cmp::min(i64::from(block_size), remaining_data); let data_length = usize::try_from(data_length).with_context(|_| error::ConvertNumberSnafu { what: "data length", number: data_length.to_string(), target: "usize", })?; block_contexts.push(BlockContext { path: PathBuf::from(path), data_length, block_index: i, block_size, snapshot_id: snapshot_id.clone(), changed_blocks_count: Arc::clone(&changed_blocks_count), block_digests: Arc::clone(&block_digests), block_errors: Arc::clone(&block_errors), progress_bar: Arc::clone(&progress_bar), ebs_client: self.client_for_block(i).clone(), zero_blocks, }); remaining_data -= i64::from(block_size); } // Distribute the work across a fixed number of concurrent workers. // New threads will be created by the runtime as needed, but we'll // only process this many blocks at once to limit resource usage. let worker_count = workers.unwrap_or(SNAPSHOT_BLOCK_WORKERS); assert!(worker_count > 0, "--workers must be greater than zero"); debug!( "Using {} concurrent upload workers across {} client shards", worker_count, self.ebs_clients.len() ); let stats = Arc::new(UploadStats::new()); let upload = stream::iter(block_contexts).for_each_concurrent(worker_count, |context| { let stats = Arc::clone(&stats); async move { for attempt in 0..SNAPSHOT_BLOCK_ATTEMPTS { if attempt > 0 { let backoff = Duration::from_secs(attempt * SNAPSHOT_BLOCK_RETRY_SCALE); debug!( "block {}: retry {}/{}, backoff {}s", context.block_index, attempt, SNAPSHOT_BLOCK_ATTEMPTS, backoff.as_secs() ); time::sleep(backoff).await; } let start = std::time::Instant::now(); let block_result = self.upload_block(&context).await; let elapsed = start.elapsed(); let mut block_errors = context.block_errors.lock().expect("poisoned"); if let Err(e) = block_result { stats.record_error(); warn!( "block {}: attempt {}/{} failed after {:.1}s: {}", context.block_index, attempt + 1, SNAPSHOT_BLOCK_ATTEMPTS, elapsed.as_secs_f64(), e ); block_errors.insert(context.block_index, e); continue; } stats.record_success(elapsed); block_errors.remove(&context.block_index); break; } } }); upload.await; stats.report(); // At this point, all the concurrent jobs have finished, so all of the Arcs we copied have // been dropped. Hence there's exactly one strong reference and it's safe to `try_unwrap` // and `unwrap` the result to recover the contents. Any of the Mutexes inside are safe to // unwrap unless they've been poisoned by a panic, in which case we also panic. // Summarize any fatal errors. let block_errors = Arc::try_unwrap(block_errors) .expect("referenced") .into_inner() .expect("poisoned"); let block_errors_count = block_errors.keys().len(); if block_errors_count != 0 { let error_report: String = block_errors.values().map(|e| e.to_string()).collect(); error::PutSnapshotBlocksSnafu { error_count: block_errors_count, snapshot_id: snapshot_id.clone(), error_report, } .fail()?; } let changed_blocks_count = changed_blocks_count.load(AtomicOrdering::Relaxed); // Compute the "linear" hash - the hash of all hashes in block index order. let block_digests = Arc::try_unwrap(block_digests) .expect("referenced") .into_inner() .expect("poisoned"); let mut full_digest = Sha256::new(); for (_, hash_bytes) in block_digests { full_digest.update(&hash_bytes); } let full_hash = base64_engine.encode(full_digest.finalize()); self.complete_snapshot(&snapshot_id, changed_blocks_count, &full_hash) .await?; Ok(snapshot_id) } /// Find the size of a file. async fn file_size(&self, file_meta: &std::fs::Metadata) -> Result { let file_len = file_meta.len(); let file_len = i64::try_from(file_len).with_context(|_| error::ConvertNumberSnafu { what: "file length", number: file_len.to_string(), target: "i64", })?; Ok(file_len) } /// Start a new snapshot and return the ID and block size for subsequent puts. async fn start_snapshot( &self, volume_size: i64, description: String, tags: Option>, kms_key_id: Option, ) -> Result<(String, i32)> { let mut request = self.ebs_clients[0] .start_snapshot() .volume_size(volume_size) .set_description(Some(description)) .set_tags(tags) .set_timeout(Some(SNAPSHOT_TIMEOUT_MINUTES)); if let Some(kms_key_id) = kms_key_id { request = request .set_encrypted(Some(true)) .set_kms_key_arn(Some(kms_key_id)); } let start_response = request.send().await.context(error::StartSnapshotSnafu)?; let snapshot_id = start_response .snapshot_id .context(error::FindSnapshotIdSnafu)?; let block_size = start_response .block_size .context(error::FindSnapshotBlockSizeSnafu)?; Ok((snapshot_id, block_size)) } /// Complete a snapshot. async fn complete_snapshot( &self, snapshot_id: &str, changed_blocks_count: i32, checksum: &str, ) -> Result<()> { self.ebs_clients[0] .complete_snapshot() .snapshot_id(snapshot_id) .changed_blocks_count(changed_blocks_count) .set_checksum(Some(checksum.to_string())) .set_checksum_algorithm(Some(SHA256_ALGORITHM)) .set_checksum_aggregation_method(Some(LINEAR_METHOD)) .send() .await .context(error::CompleteSnapshotSnafu { snapshot_id })?; Ok(()) } /// Read from the file in context and upload a single block to the snapshot. async fn upload_block(&self, context: &BlockContext) -> Result<()> { let path: &Path = context.path.as_ref(); let mut f = File::open(path) .await .context(error::OpenFileSnafu { path })?; let block_index_u64: u64 = u64::try_from(context.block_index).with_context(|_| error::ConvertNumberSnafu { what: "block_index", number: context.block_index.to_string(), target: "u64", })?; let block_size_u64: u64 = u64::try_from(context.block_size).with_context(|_| error::ConvertNumberSnafu { what: "block_size", number: context.block_size.to_string(), target: "u64", })?; let offset: u64 = block_index_u64 .checked_mul(block_size_u64) .with_context(|| error::CheckedMultiplicationSnafu { right: "block_size", right_number: context.block_size.to_string(), left: "block_index", left_number: context.block_index.to_string(), target: "u64", })?; f.seek(SeekFrom::Start(offset)) .await .context(error::SeekFileOffsetSnafu { path, offset })?; let block_size = context.block_size; let block_size = usize::try_from(block_size).with_context(|_| error::ConvertNumberSnafu { what: "block size", number: block_size.to_string(), target: "usize", })?; let mut block = BytesMut::with_capacity(block_size); let count = context.data_length; block.resize(count, 0x0); f.read_exact(block.as_mut()) .await .context(error::ReadFileBytesSnafu { path, count, offset, })?; if let ZeroBlocks::Omit = context.zero_blocks { let sparse = block.iter().all(|&byte| byte == 0u8); // Found a block of all zeroes, and told to omit those from the snapshot. if sparse { if let Some(ref progress_bar) = *context.progress_bar { progress_bar.inc(1); } return Ok(()); } } // Blocks must be padded to the expected block size. if block.len() < block_size { block.resize(block_size, 0x0); } let mut block_digest = Sha256::new(); block_digest.update(&block); let hash_bytes = block_digest.finalize(); let block_hash = base64_engine.encode(hash_bytes); let snapshot_id = &context.snapshot_id; let block_index = context.block_index; let data_length = block.len(); let data_length = i32::try_from(data_length).with_context(|_| error::ConvertNumberSnafu { what: "data length", number: data_length.to_string(), target: "i32", })?; context .ebs_client .put_snapshot_block() .snapshot_id(snapshot_id.to_string()) .block_index(block_index) .block_data(ByteStream::from(block.freeze())) .data_length(data_length) .checksum(block_hash) .checksum_algorithm(SHA256_ALGORITHM) .send() .await .context(error::PutSnapshotBlockSnafu { snapshot_id, block_index, })?; let mut block_digests = context.block_digests.lock().expect("poisoned"); block_digests.insert(block_index, hash_bytes.to_vec()); let changed_blocks_count = &context.changed_blocks_count; changed_blocks_count.fetch_add(1, AtomicOrdering::Relaxed); if let Some(ref progress_bar) = *context.progress_bar { progress_bar.inc(1); } Ok(()) } } /// Stores the context needed to upload a snapshot block. struct BlockContext { path: PathBuf, data_length: usize, block_index: i32, block_size: i32, snapshot_id: String, changed_blocks_count: Arc, block_digests: Arc>>>, block_errors: Arc>>, progress_bar: Arc>, ebs_client: EbsClient, zero_blocks: ZeroBlocks, } /// Potential errors while reading a local file and uploading a snapshot. mod error { use aws_sdk_ebs::operation::{ complete_snapshot::CompleteSnapshotError, put_snapshot_block::PutSnapshotBlockError, start_snapshot::StartSnapshotError, }; use snafu::Snafu; use std::path::PathBuf; #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] pub(super) enum Error { #[snafu(display("Failed to read metadata for '{}': {}", path.display(), source))] ReadFileMetadata { path: PathBuf, source: std::io::Error, }, #[snafu(display("{}", source))] GetBlockDeviceSize { source: crate::block_device::Error }, #[snafu(display( "Bad volume size: requested {} GiB, needed at least {} GiB", requested, needed ))] BadVolumeSize { requested: i64, needed: i64 }, #[snafu(display("Failed to open '{}': {}", path.display(), source))] OpenFile { path: PathBuf, source: std::io::Error, }, #[snafu(display("Failed to seek to {} in '{}': {}", offset, path.display(), source))] SeekFileOffset { path: PathBuf, offset: u64, source: std::io::Error, }, #[snafu(display("Failed to read {} bytes at offset {} from '{}': {}", count, offset, path.display(), source))] ReadFileBytes { path: PathBuf, count: usize, offset: u64, source: std::io::Error, }, #[snafu(display("Failed to start snapshot: {source}", source = crate::error_stack(&source, 2)))] StartSnapshot { #[snafu(source(from(aws_sdk_ebs::error::SdkError, Box::new)))] source: Box>, }, #[snafu(display( "Failed to put block {} for snapshot '{}': {}", block_index, snapshot_id, source ))] PutSnapshotBlock { snapshot_id: String, block_index: i64, #[snafu(source(from(aws_sdk_ebs::error::SdkError, Box::new)))] source: Box>, }, #[snafu(display( "Failed to put {} blocks for snapshot '{}': {}", error_count, snapshot_id, error_report ))] PutSnapshotBlocks { error_count: usize, snapshot_id: String, error_report: String, }, #[snafu(display("Failed to complete snapshot '{}': {}", snapshot_id, source))] CompleteSnapshot { snapshot_id: String, #[snafu(source(from(aws_sdk_ebs::error::SdkError, Box::new)))] source: Box>, }, #[snafu(display("Failed to find snapshot ID"))] FindSnapshotId {}, #[snafu(display("Failed to find snapshot block size"))] FindSnapshotBlockSize {}, #[snafu(display("Failed to convert {} {} to {}: {}", what, number, target, source))] ConvertNumber { what: String, number: String, target: String, source: std::num::TryFromIntError, }, #[snafu(display( "Overflowed multiplying {} ({}) and {} ({}) inside a {}", left, left_number, right, right_number, target ))] CheckedMultiplication { left: String, left_number: String, right: String, right_number: String, target: String, }, } } #[cfg(test)] mod test { use super::*; #[test] fn histogram_bucket_boundaries() { let stats = UploadStats::new(); stats.record_success(Duration::from_millis(0)); stats.record_success(Duration::from_millis(249)); stats.record_success(Duration::from_millis(250)); stats.record_success(Duration::from_millis(499)); stats.record_success(Duration::from_millis(500)); stats.record_success(Duration::from_millis(999)); stats.record_success(Duration::from_millis(1000)); stats.record_success(Duration::from_millis(1999)); stats.record_success(Duration::from_millis(2000)); stats.record_success(Duration::from_millis(4999)); stats.record_success(Duration::from_millis(5000)); stats.record_success(Duration::from_millis(60000)); let b: Vec = stats .buckets .iter() .map(|a| a.load(AtomicOrdering::Relaxed)) .collect(); assert_eq!(b[0], 2); // <250ms: 0, 249 assert_eq!(b[1], 2); // 250-500ms: 250, 499 assert_eq!(b[2], 2); // 500ms-1s: 500, 999 assert_eq!(b[3], 2); // 1-2s: 1000, 1999 assert_eq!(b[4], 2); // 2-5s: 2000, 4999 assert_eq!(b[5], 2); // >5s: 5000, 60000 } #[test] fn error_counter() { let stats = UploadStats::new(); stats.record_error(); stats.record_error(); stats.record_error(); assert_eq!(stats.errors.load(AtomicOrdering::Relaxed), 3); } #[test] fn client_for_block_modulo_logic() { // Verify the shard selection formula: block_index % num_shards. let num_shards = 3usize; let expected = [0, 1, 2, 0, 1, 2, 0, 1, 2]; for (i, &want) in expected.iter().enumerate() { assert_eq!(i % num_shards, want); } } #[test] #[should_panic(expected = "need at least one EBS client")] fn with_client_shards_rejects_empty() { SnapshotUploader::with_client_shards(vec![]); } #[test] #[should_panic(expected = "--workers must be greater than zero")] fn worker_count_zero_panics() { // Simulates what happens if workers=Some(0) gets past CLI validation. let count: usize = 0; assert!(count > 0, "--workers must be greater than zero"); } } ================================================ FILE: src/wait.rs ================================================ // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 /*! Wait for Amazon EBS snapshots to be in the desired state. */ use aws_sdk_ec2::types::SnapshotState; use aws_sdk_ec2::Client as Ec2Client; use snafu::{ensure, ResultExt, Snafu}; use std::thread::sleep; use std::time::Duration; #[derive(Debug, Snafu)] pub struct Error(error::Error); type Result = std::result::Result; #[derive(Debug)] pub struct WaitParams { pub state: String, pub successes_required: u8, pub max_attempts: u8, pub duration_between_attempts: Duration, } impl Default for WaitParams { fn default() -> Self { Self { state: "completed".to_string(), successes_required: 3, max_attempts: 90, duration_between_attempts: Duration::from_secs(2), } } } impl WaitParams { pub fn new( desired_status: Option, successes_required: Option, max_attempts: Option, duration_between_attempts: Option, ) -> Self { let mut wait_params = Self::default(); if let Some(desired_status) = desired_status { wait_params.state = desired_status; } if let Some(successes_required) = successes_required { wait_params.successes_required = successes_required; } if let Some(max_attempts) = max_attempts { wait_params.max_attempts = max_attempts; } if let Some(duration_between_attempts) = duration_between_attempts { wait_params.duration_between_attempts = duration_between_attempts; } wait_params } } /// Allows you to wait for snapshots to come to a desired state in the region associated with the /// given Ec2Client. pub struct SnapshotWaiter { ec2_client: Ec2Client, } impl SnapshotWaiter { pub fn new(ec2_client: Ec2Client) -> Self { Self { ec2_client } } /// Waits for the given snapshot ID to be completed. pub async fn wait_for_completed(&self, snapshot_id: S) -> Result<()> where S: AsRef, { self.wait(snapshot_id, Default::default()).await } /// Waits for the given snapshot ID to move to the given state, with configurable number of /// attempts and number of successful checks in a row. pub async fn wait(&self, snapshot_id: S, wait_params: WaitParams) -> Result<()> where S: AsRef, { let WaitParams { state, successes_required, max_attempts, duration_between_attempts, } = wait_params; let mut successes = 0; let mut attempts = 0; loop { attempts += 1; // Stop if we're over max, unless we're on a success streak, then give it some wiggle room. ensure!( (attempts - successes) <= max_attempts, error::MaxAttemptsSnafu { max_attempts } ); let describe_response = self .ec2_client .describe_snapshots() .set_snapshot_ids(Some(vec![snapshot_id.as_ref().to_string()])) .send() .await .context(error::DescribeSnapshotsSnafu)?; // The response contains an Option>, so we have to check that we got a // list at all, and then that the list contains the ID in question. if let Some(snapshots) = describe_response.snapshots { let mut saw_it = false; for snapshot in snapshots { if let Some(ref found_id) = snapshot.snapshot_id { if let Some(found_state) = snapshot.state { if snapshot_id.as_ref() == found_id && state == found_state.as_str() { // Success; check if we have enough to declare victory. saw_it = true; successes += 1; if successes >= successes_required { return Ok(()); } break; } // If the state was error, we know we'll never hit their desired state. // (Unless they desired "error", which will be caught above.) ensure!(found_state != SnapshotState::Error, error::StateSnafu); } } } if !saw_it { // Did not find snapshot in list; reset success count and try again (if we have spare attempts) successes = 0; } } else { // Did not receive list; reset success count and try again (if we have spare attempts) successes = 0; }; sleep(duration_between_attempts); } } } /// Potential errors while waiting for the snapshot. mod error { use aws_sdk_ec2::operation::describe_snapshots::DescribeSnapshotsError; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(super)))] pub(super) enum Error { #[snafu(display("Failed to describe snapshots: {}", source))] DescribeSnapshots { // Clippy gets upset if this isn't a box // The size difference between this and the other enums is too much #[snafu(source(from(aws_sdk_ec2::error::SdkError, Box::new)))] source: Box>, }, #[snafu(display("Snapshot went to 'error' state"))] State, #[snafu(display("Failed to reach desired state within {} attempts", max_attempts))] MaxAttempts { max_attempts: u8 }, } }